From 5ac18747007b682774ce98f293ef9d8920976bbb Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 14 May 2024 17:55:57 +0300 Subject: [PATCH 001/106] Bridge: add subcommand to relay messages range (#4383) related to https://github.com/paritytech/parity-bridges-common/issues/2962 Usage example: ```sh RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ ./target/release/substrate-relay relay-messages-range bridge-hub-rococo-to-bridge-hub-westend \ --source-host localhost \ --source-port 8943 \ --source-version-mode Auto \ --source-signer //Eve \ --source-transactions-mortality 4 \ --target-host localhost \ --target-port 8945 \ --target-version-mode Auto \ --target-signer //Eve \ --target-transactions-mortality 4 \ --lane 00000002 \ --at-source-block 34 \ --messages-start 1 \ --messages-end 1 INFO bridge Connecting to BridgeHubRococo node at ws://localhost:8943 INFO bridge Connecting to BridgeHubWestend node at ws://localhost:8945 TRACE bridge Refined weight of BridgeHubRococo->BridgeHubWestend message [0, 0, 0, 2]/1: at-source: Weight(ref_time: 0, proof_size: 0), at-target: Weight(ref_time: 452953993, proof_size: 0) TRACE bridge Sent transaction to BridgeHubWestend node: 0x38552f4db6bc78baecb52ebd2f7d103b1c919c16b83129dc083bf01b7281955b TRACE bridge BridgeHubWestend transaction 0x38552f4db6bc78baecb52ebd2f7d103b1c919c16b83129dc083bf01b7281955b has been included in block: (0x29a20bdca8726df0b32af9067290b7fc0a886908da3a30f3db60a6ea52be4604, 0) TRACE bridge BridgeHubWestend transaction 0x38552f4db6bc78baecb52ebd2f7d103b1c919c16b83129dc083bf01b7281955b has been finalized at block: 0x29a20bdca8726df0b32af9067290b7fc0a886908da3a30f3db60a6ea52be4604 ``` --- .../src/cli/relay_messages.rs | 70 ++++++++++++++++++- .../lib-substrate-relay/src/messages_lane.rs | 45 +++++++++++- bridges/relays/messages/src/lib.rs | 2 + .../messages/src/message_race_delivery.rs | 65 ++++++++++++++++- 4 files changed, 179 insertions(+), 3 deletions(-) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index b672bd4f9b8..e5b07b24158 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -26,9 +26,12 @@ use async_trait::async_trait; use sp_core::Pair; use structopt::StructOpt; +use bp_messages::MessageNonce; +use bp_runtime::HeaderIdProvider; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, ChainWithTransactions, }; +use relay_utils::UniqueSaturatedInto; /// Messages relaying params. #[derive(StructOpt)] @@ -48,6 +51,35 @@ pub struct RelayMessagesParams { prometheus_params: PrometheusParams, } +/// Messages range relaying params. +#[derive(StructOpt)] +pub struct RelayMessagesRangeParams { + /// Number of the source chain header that we will use to prepare a messages proof. + /// This header must be previously proved to the target chain. + #[structopt(long)] + at_source_block: u128, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Nonce (inclusive) of the first message to relay. + #[structopt(long)] + messages_start: MessageNonce, + /// Nonce (inclusive) of the last message to relay. + #[structopt(long)] + messages_end: MessageNonce, + /// Whether the outbound lane state proof should be included into transaction. + #[structopt(long)] + outbound_state_proof_required: bool, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, +} + /// Trait used for relaying messages between 2 chains. #[async_trait] pub trait MessagesRelayer: MessagesCliBridge @@ -86,4 +118,40 @@ where .await .map_err(|e| anyhow::format_err!("{}", e)) } + + /// Relay a consequitive range of messages. + async fn relay_messages_range(data: RelayMessagesRangeParams) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let source_sign = data.source_sign.to_keypair::()?; + let source_transactions_mortality = data.source_sign.transactions_mortality()?; + let target_sign = data.target_sign.to_keypair::()?; + let target_transactions_mortality = data.target_sign.transactions_mortality()?; + + let at_source_block = source_client + .header_by_number(data.at_source_block.unique_saturated_into()) + .await + .map_err(|e| { + log::trace!( + target: "bridge", + "Failed to read {} header with number {}: {e:?}", + Self::Source::NAME, + data.at_source_block, + ); + anyhow::format_err!("The command has failed") + })? + .id(); + + crate::messages_lane::relay_messages_range::( + source_client, + target_client, + TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, + TransactionParams { signer: target_sign, mortality: target_transactions_mortality }, + at_source_block, + data.lane.into(), + data.messages_start..=data.messages_end, + data.outbound_state_proof_required, + ) + .await + } } diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index 58e9ded312d..a34b165289b 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -46,7 +46,7 @@ use relay_utils::{ }; use sp_core::Pair; use sp_runtime::traits::Zero; -use std::{fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData, ops::RangeInclusive}; /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { @@ -275,6 +275,49 @@ where .map_err(Into::into) } +/// Deliver range of Substrate-to-Substrate messages. No checks are made to ensure that transaction +/// will succeed. +pub async fn relay_messages_range( + source_client: Client, + target_client: Client, + source_transaction_params: TransactionParams>, + target_transaction_params: TransactionParams>, + at_source_block: HeaderIdOf, + lane_id: LaneId, + range: RangeInclusive, + outbound_state_proof_required: bool, +) -> anyhow::Result<()> +where + AccountIdOf: From< as Pair>::Public>, + AccountIdOf: From< as Pair>::Public>, + BalanceOf: TryFrom>, +{ + let relayer_id_at_source: AccountIdOf = + source_transaction_params.signer.public().into(); + messages_relay::relay_messages_range( + SubstrateMessagesSource::

::new( + source_client.clone(), + target_client.clone(), + lane_id, + source_transaction_params, + None, + ), + SubstrateMessagesTarget::

::new( + target_client, + source_client, + lane_id, + relayer_id_at_source, + target_transaction_params, + None, + ), + at_source_block, + range, + outbound_state_proof_required, + ) + .await + .map_err(|_| anyhow::format_err!("The command has failed")) +} + /// Different ways of building `receive_messages_proof` calls. pub trait ReceiveMessagesProofCallBuilder { /// Given messages proof, build call of `receive_messages_proof` function of bridge diff --git a/bridges/relays/messages/src/lib.rs b/bridges/relays/messages/src/lib.rs index 9c62cee5ee3..7c18b6b148f 100644 --- a/bridges/relays/messages/src/lib.rs +++ b/bridges/relays/messages/src/lib.rs @@ -35,3 +35,5 @@ mod message_race_limits; mod message_race_loop; mod message_race_receiving; mod message_race_strategy; + +pub use message_race_delivery::relay_messages_range; diff --git a/bridges/relays/messages/src/message_race_delivery.rs b/bridges/relays/messages/src/message_race_delivery.rs index f18c43cc7f0..cbb89baabcc 100644 --- a/bridges/relays/messages/src/message_race_delivery.rs +++ b/bridges/relays/messages/src/message_race_delivery.rs @@ -19,7 +19,7 @@ use async_trait::async_trait; use futures::stream::FusedStream; use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; -use relay_utils::FailedClient; +use relay_utils::{FailedClient, TrackedTransactionStatus, TransactionTracker}; use crate::{ message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, @@ -77,6 +77,69 @@ pub async fn run( .await } +/// Relay range of messages. +pub async fn relay_messages_range( + source_client: impl MessageLaneSourceClient

, + target_client: impl MessageLaneTargetClient

, + at: SourceHeaderIdOf

, + range: RangeInclusive, + outbound_state_proof_required: bool, +) -> Result<(), ()> { + // compute cumulative dispatch weight of all messages in given range + let dispatch_weight = source_client + .generated_message_details(at.clone(), range.clone()) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to get generated message details at {:?} for messages {:?}: {:?}", + at, + range, + e, + ); + })? + .values() + .fold(Weight::zero(), |total, details| total.saturating_add(details.dispatch_weight)); + // prepare messages proof + let (at, range, proof) = source_client + .prove_messages( + at.clone(), + range.clone(), + MessageProofParameters { outbound_state_proof_required, dispatch_weight }, + ) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to generate messages proof at {:?} for messages {:?}: {:?}", + at, + range, + e, + ); + })?; + // submit messages proof to the target node + let tx_tracker = target_client + .submit_messages_proof(None, at, range.clone(), proof) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to submit messages proof for messages {:?}: {:?}", + range, + e, + ); + })? + .tx_tracker; + + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => { + log::error!("Transaction with messages {:?} is considered lost", range,); + Err(()) + }, + } +} + /// Message delivery race. struct MessageDeliveryRace

(std::marker::PhantomData

); -- GitLab From 712a750a995976a66a712c15376c0cfadea6914a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 16:03:25 +0000 Subject: [PATCH 002/106] Bump nix from 0.27.1 to 0.28.0 (#4438) Bumps [nix](https://github.com/nix-rust/nix) from 0.27.1 to 0.28.0.

Changelog

Sourced from nix's changelog.

[0.28.0] - 2024-02-24

Added

  • Added mkdtemp wrapper (#1297)

  • Add associated constants UTIME_OMIT UTIME_NOW for TimeSpec (#1879)

  • Added EventFd type. (#1945)

    • Added impl From<Signal> for SigSet.
    • Added impl std::ops::BitOr for SigSet.
    • Added impl std::ops::BitOr for Signal.
    • Added impl std::ops::BitOr<Signal> for SigSet

    (#1959)

  • Added TlsGetRecordType control message type and corresponding enum for linux (#2065)

  • Added Ipv6HopLimit to ::nix::sys::socket::ControlMessage for Linux, MacOS, FreeBSD, DragonflyBSD, Android, iOS and Haiku. (#2074)

  • Added Icmp and IcmpV6 to SockProtocol (#2103)

  • Added rfork support for FreeBSD in unistd (#2121)

  • Added MapFlags::map_hugetlb_with_size_log2 method for Linux targets (#2125)

  • Added mmap_anonymous function (#2127)

  • Added mips32r6 and mips64r6 support for signal, ioctl and ptrace (#2138)

  • Added F_GETPATH FcntlFlags entry on Apple/NetBSD/DragonflyBSD for ::nix::fcntl. (#2142)

  • Added F_KINFO FcntlFlags entry on FreeBSD for ::nix::fcntl. (#2152)

  • Added F_GETPATH_NOFIRMLINK and F_BARRIERFSYNC FcntlFlags entry on Apple for ::nix::fcntl. (#2155)

  • Added newtype Flock to automatically unlock a held flock upon drop. Added Flockable trait to represent valid types for Flock. (#2170)

  • Added SetSockOpt impls to enable Linux Kernel TLS on a TCP socket and to import TLS parameters. (#2175)

    • Added the ::nix::sys::socket::SocketTimestamp enum for configuring the TsClock (a.k.a SO_TS_CLOCK) sockopt
    • Added FreeBSD's ScmRealtime and ScmMonotonic as new options in ::nix::sys::socket::ControlMessageOwned

    (#2187)

  • Added new fanotify API: wrappers for fanotify_init and fanotify_mark (#2194)

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=nix&package-manager=cargo&previous-version=0.27.1&new-version=0.28.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 19 ++++++++++--------- cumulus/polkadot-parachain/Cargo.toml | 2 +- polkadot/Cargo.toml | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- .../node/core/pvf/execute-worker/Cargo.toml | 2 +- .../node/core/pvf/prepare-worker/Cargo.toml | 2 +- substrate/bin/node/cli/Cargo.toml | 2 +- substrate/test-utils/cli/Cargo.toml | 2 +- 8 files changed, 17 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7711d51d0df..2b2576d1adf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8876,12 +8876,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.4.0", "cfg-if", + "cfg_aliases", "libc", ] @@ -12535,7 +12536,7 @@ version = "6.0.0" dependencies = [ "assert_cmd", "color-eyre", - "nix 0.27.1", + "nix 0.28.0", "polkadot-cli", "polkadot-core-primitives", "polkadot-node-core-pvf", @@ -13219,7 +13220,7 @@ dependencies = [ "futures", "landlock", "libc", - "nix 0.27.1", + "nix 0.28.0", "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-primitives", @@ -13244,7 +13245,7 @@ dependencies = [ "cfg-if", "cpu-time", "libc", - "nix 0.27.1", + "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", @@ -13260,7 +13261,7 @@ dependencies = [ "cfg-if", "criterion", "libc", - "nix 0.27.1", + "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-primitives", @@ -13553,7 +13554,7 @@ dependencies = [ "hex-literal", "jsonrpsee", "log", - "nix 0.27.1", + "nix 0.28.0", "pallet-transaction-payment", "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", @@ -20120,7 +20121,7 @@ dependencies = [ "kitchensink-runtime", "log", "mmr-gadget", - "nix 0.27.1", + "nix 0.28.0", "node-primitives", "node-rpc", "node-testing", @@ -20520,7 +20521,7 @@ version = "0.1.0" dependencies = [ "assert_cmd", "futures", - "nix 0.27.1", + "nix 0.28.0", "node-primitives", "regex", "sc-cli", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index f21a5baf973..3d0aa94e8de 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -118,7 +118,7 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util [dev-dependencies] assert_cmd = "2.0" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } tempfile = "3.8.0" tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = "0.2" diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 7b5679e1084..3aeec8d5961 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -43,7 +43,7 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo [dev-dependencies] assert_cmd = "2.0.4" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } tempfile = "3.2.0" tokio = "1.37" substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index e1ce6e79cb9..adf353fe2e4 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -14,7 +14,7 @@ cpu-time = "1.0.0" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" -nix = { version = "0.27.1", features = ["resource", "sched"] } +nix = { version = "0.28.0", features = ["resource", "sched"] } thiserror = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = [ diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 04a620573b2..3480264d1da 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -13,7 +13,7 @@ workspace = true cpu-time = "1.0.0" gum = { package = "tracing-gum", path = "../../../gum" } cfg-if = "1.0" -nix = { version = "0.27.1", features = ["process", "resource", "sched"] } +nix = { version = "0.28.0", features = ["process", "resource", "sched"] } libc = "0.2.152" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 9ecf1c8af50..12628565e3a 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -18,7 +18,7 @@ rayon = "1.5.1" tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } -nix = { version = "0.27.1", features = ["process", "resource", "sched"] } +nix = { version = "0.28.0", features = ["process", "resource", "sched"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 323afa56696..a77e197cf6f 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -132,7 +132,7 @@ sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } futures = "0.3.30" tempfile = "3.1.0" assert_cmd = "2.0.2" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } regex = "1.6.0" platforms = "3.0" soketto = "0.7.1" diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index c4f87671000..87c595c66f3 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-rpc-client = { path = "../../utils/frame/rpc/client" } sp-rpc = { path = "../../primitives/rpc" } assert_cmd = "2.0.10" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } regex = "1.7.3" tokio = { version = "1.22.0", features = ["full"] } node-primitives = { path = "../../bin/node/primitives" } -- GitLab From 12ab31508cf8f03e6cc233722e535c383dc97c41 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 14 May 2024 21:35:04 +0100 Subject: [PATCH 003/106] make polkadot-parachain chain-spec extension more relaxed (#4452) A small step towards https://forum.polkadot.network/t/polkadot-parachain-omni-node-gathering-ideas-and-feedback/7823 and #4352. Many parachains use `camelCase` and/or `PascalCase`ing for their chain spec extension. Sometimes the only reason polkadot-parachain cannot sync them is because it cannot parse the chain spec extension. This PR relaxes the requirement for the extension to be camel case. --------- Co-authored-by: Branislav Kontur --- .../polkadot-parachain/src/chain_spec/mod.rs | 22 ++++++++++++++++++- templates/parachain/node/src/chain_spec.rs | 3 ++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/cumulus/polkadot-parachain/src/chain_spec/mod.rs b/cumulus/polkadot-parachain/src/chain_spec/mod.rs index bbda334e4c6..136a19e3166 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/mod.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/mod.rs @@ -37,11 +37,12 @@ const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; /// Generic extensions for Parachain ChainSpecs. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] -#[serde(deny_unknown_fields)] pub struct Extensions { /// The relay chain of the Parachain. + #[serde(alias = "relayChain", alias = "RelayChain")] pub relay_chain: String, /// The id of the Parachain. + #[serde(alias = "paraId", alias = "ParaId")] pub para_id: u32, } @@ -78,3 +79,22 @@ where pub fn get_collator_keys_from_seed(seed: &str) -> ::Public { get_from_seed::(seed) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn can_decode_extension_camel_and_snake_case() { + let camel_case = r#"{"relayChain":"relay","paraId":1}"#; + let snake_case = r#"{"relay_chain":"relay","para_id":1}"#; + let pascal_case = r#"{"RelayChain":"relay","ParaId":1}"#; + + let camel_case_extension: Extensions = serde_json::from_str(camel_case).unwrap(); + let snake_case_extension: Extensions = serde_json::from_str(snake_case).unwrap(); + let pascal_case_extension: Extensions = serde_json::from_str(pascal_case).unwrap(); + + assert_eq!(camel_case_extension, snake_case_extension); + assert_eq!(snake_case_extension, pascal_case_extension); + } +} diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index 16c91865cdb..51710f1199c 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -22,11 +22,12 @@ pub fn get_from_seed(seed: &str) -> ::Pu /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] -#[serde(deny_unknown_fields)] pub struct Extensions { /// The relay chain of the Parachain. + #[serde(alias = "relayChain", alias = "RelayChain")] pub relay_chain: String, /// The id of the Parachain. + #[serde(alias = "paraId", alias = "ParaId")] pub para_id: u32, } -- GitLab From 9c69bb9850ff228f063d344f5f4beae5f2a1c793 Mon Sep 17 00:00:00 2001 From: shamil-gadelshin Date: Wed, 15 May 2024 15:52:26 +0700 Subject: [PATCH 004/106] Change forks pruning algorithm. (#3962) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR changes the fork calculation and pruning algorithm to enable future block header pruning. It's required because the previous algorithm relied on the block header persistence. It follows the [related discussion](https://github.com/paritytech/polkadot-sdk/issues/1570) The previous code contained this comment describing the situation: ``` /// Note a block height finalized, displacing all leaves with number less than the finalized /// block's. /// /// Although it would be more technically correct to also prune out leaves at the /// same number as the finalized block, but with different hashes, the current behavior /// is simpler and our assumptions about how finalization works means that those leaves /// will be pruned soon afterwards anyway. pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { ``` The previous algorithm relied on the existing block headers to prune forks later and to enable block header pruning we need to clear all obsolete forks right after the block finalization to not depend on the related block headers in the future. --------- Co-authored-by: Bastian Kรถcher --- prdoc/pr_3962.prdoc | 12 ++ substrate/client/api/src/in_mem.rs | 14 -- substrate/client/api/src/leaves.rs | 108 +++---------- substrate/client/consensus/babe/src/lib.rs | 5 +- substrate/client/consensus/babe/src/tests.rs | 4 +- substrate/client/db/src/lib.rs | 72 +++------ .../merkle-mountain-range/src/offchain_mmr.rs | 13 +- .../rpc-spec-v2/src/chain_head/tests.rs | 47 +++--- substrate/client/service/src/client/client.rs | 8 +- .../client/service/test/src/client/mod.rs | 18 ++- .../primitives/blockchain/src/backend.rs | 142 +++++++++++------- .../blockchain/src/header_metadata.rs | 2 +- 12 files changed, 196 insertions(+), 249 deletions(-) create mode 100644 prdoc/pr_3962.prdoc diff --git a/prdoc/pr_3962.prdoc b/prdoc/pr_3962.prdoc new file mode 100644 index 00000000000..7ef59d38ce5 --- /dev/null +++ b/prdoc/pr_3962.prdoc @@ -0,0 +1,12 @@ +title: Change fork calculation algorithm. + +doc: + - audience: Node Dev + description: | + This PR changes the fork calculation and pruning algorithm to enable future block header pruning. + During the finalization of the block we prune known stale forks, so forks are pruned faster. + +crates: + - name: sc-client-api + - name: sc-client-db + - name: sp-blockchain diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index b933ed1f17e..ba89aede914 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -419,20 +419,6 @@ impl blockchain::Backend for Blockchain { Ok(self.storage.read().leaves.hashes()) } - fn displaced_leaves_after_finalizing( - &self, - block_number: NumberFor, - ) -> sp_blockchain::Result> { - Ok(self - .storage - .read() - .leaves - .displaced_by_finalize_height(block_number) - .leaves() - .cloned() - .collect::>()) - } - fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { unimplemented!() } diff --git a/substrate/client/api/src/leaves.rs b/substrate/client/api/src/leaves.rs index a8a988771e2..e129de8bf3f 100644 --- a/substrate/client/api/src/leaves.rs +++ b/substrate/client/api/src/leaves.rs @@ -49,7 +49,7 @@ pub struct FinalizationOutcome { removed: BTreeMap, Vec>, } -impl FinalizationOutcome { +impl FinalizationOutcome { /// Merge with another. This should only be used for displaced items that /// are produced within one transaction of each other. pub fn merge(&mut self, mut other: Self) { @@ -63,6 +63,16 @@ impl FinalizationOutcome { pub fn leaves(&self) -> impl Iterator { self.removed.values().flatten() } + + /// Constructor + pub fn new(new_displaced: impl Iterator) -> Self { + let mut removed = BTreeMap::, Vec>::new(); + for (hash, number) in new_displaced { + removed.entry(Reverse(number)).or_default().push(hash); + } + + FinalizationOutcome { removed } + } } /// list of leaf hashes ordered by number (descending). @@ -151,39 +161,12 @@ where Some(RemoveOutcome { inserted, removed: LeafSetItem { hash, number } }) } - /// Note a block height finalized, displacing all leaves with number less than the finalized - /// block's. - /// - /// Although it would be more technically correct to also prune out leaves at the - /// same number as the finalized block, but with different hashes, the current behavior - /// is simpler and our assumptions about how finalization works means that those leaves - /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { - let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } - } else { - number - N::one() - }; - - let below_boundary = self.storage.split_off(&Reverse(boundary)); - FinalizationOutcome { removed: below_boundary } - } - - /// The same as [`Self::finalize_height`], but it only simulates the operation. - /// - /// This means that no changes are done. - /// - /// Returns the leaves that would be displaced by finalizing the given block. - pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { - let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } - } else { - number - N::one() - }; - - let below_boundary = self.storage.range(&Reverse(boundary)..); - FinalizationOutcome { - removed: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), + /// Remove all leaves displaced by the last block finalization. + pub fn remove_displaced_leaves(&mut self, displaced_leaves: &FinalizationOutcome) { + for (number, hashes) in &displaced_leaves.removed { + for hash in hashes.iter() { + self.remove_leaf(number, hash); + } } } @@ -420,32 +403,6 @@ mod tests { assert!(set.contains(11, 11_2)); } - #[test] - fn finalization_works() { - let mut set = LeafSet::new(); - set.import(9_1u32, 9u32, 0u32); - set.import(10_1, 10, 9_1); - set.import(10_2, 10, 9_1); - set.import(11_1, 11, 10_1); - set.import(11_2, 11, 10_1); - set.import(12_1, 12, 11_2); - - let outcome = set.finalize_height(11); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert_eq!( - outcome.removed, - [(Reverse(10), vec![10_2])].into_iter().collect::>(), - ); - - set.undo().undo_finalization(outcome); - assert_eq!(set.count(), 3); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert!(set.contains(10, 10_2)); - } - #[test] fn flush_to_disk() { const PREFIX: &[u8] = b"abcdefg"; @@ -479,35 +436,4 @@ mod tests { assert!(set.contains(10, 1_2)); assert!(!set.contains(10, 1_3)); } - - #[test] - fn finalization_consistent_with_disk() { - const PREFIX: &[u8] = b"prefix"; - let db = Arc::new(sp_database::MemDb::default()); - - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - assert!(set.contains(10, 10_1)); - - let mut tx = Transaction::new(); - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx).unwrap(); - - let _ = set.finalize_height(11); - let mut tx = Transaction::new(); - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx).unwrap(); - - assert!(set.contains(11, 11_1)); - assert!(set.contains(11, 11_2)); - assert!(set.contains(12, 12_1)); - assert!(!set.contains(10, 10_1)); - - let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); - assert_eq!(set, set2); - } } diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index d10bdd8c7e4..a1ca6fd23b5 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -562,9 +562,10 @@ fn aux_storage_cleanup + HeaderBackend, Block: B // Cleans data for stale forks. let stale_forks = match client.expand_forks(¬ification.stale_heads) { Ok(stale_forks) => stale_forks, - Err((stale_forks, e)) => { + Err(e) => { warn!(target: LOG_TARGET, "{:?}", e); - stale_forks + + Default::default() }, }; hashes.extend(stale_forks.iter()); diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 38c9e1ff6ac..716067ae400 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -1094,8 +1094,8 @@ async fn obsolete_blocks_aux_data_cleanup() { assert!(aux_data_check(&fork1_hashes[2..3], false)); // Present: A4 assert!(aux_data_check(&fork1_hashes[3..], true)); - // Present C4, C5 - assert!(aux_data_check(&fork3_hashes, true)); + // Wiped C4, C5 + assert!(aux_data_check(&fork3_hashes, false)); } #[tokio::test] diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 0faa90dfc4f..36f9aea817c 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -68,8 +68,8 @@ use sc_client_api::{ use sc_state_db::{IsPruned, LastCanonicalized, StateDb}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{ - Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, - HeaderMetadataCache, Result as ClientResult, + Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError, + HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult, }; use sp_core::{ offchain::OffchainOverlayedChange, @@ -747,19 +747,6 @@ impl sc_client_api::blockchain::Backend for BlockchainDb, - ) -> ClientResult> { - Ok(self - .leaves - .read() - .displaced_by_finalize_height(block_number) - .leaves() - .cloned() - .collect::>()) - } - fn children(&self, parent_hash: Block::Hash) -> ClientResult> { children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) } @@ -1813,14 +1800,13 @@ impl Backend { apply_state_commit(transaction, commit); } - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); - self.prune_blocks( - transaction, - f_num, - f_hash, - &new_displaced, - current_transaction_justifications, - )?; + let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; + let finalization_outcome = + FinalizationOutcome::new(new_displaced.displaced_leaves.clone().into_iter()); + + self.blockchain.leaves.write().remove_displaced_leaves(&finalization_outcome); + + self.prune_blocks(transaction, f_num, &new_displaced, current_transaction_justifications)?; Ok(()) } @@ -1829,8 +1815,7 @@ impl Backend { &self, transaction: &mut Transaction, finalized_number: NumberFor, - finalized_hash: Block::Hash, - displaced: &FinalizationOutcome>, + displaced: &DisplacedLeavesAfterFinalization, current_transaction_justifications: &mut HashMap, ) -> ClientResult<()> { match self.blocks_pruning { @@ -1858,10 +1843,10 @@ impl Backend { self.prune_block(transaction, BlockId::::number(number))?; } - self.prune_displaced_branches(transaction, finalized_hash, displaced)?; + self.prune_displaced_branches(transaction, displaced)?; }, BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, finalized_hash, displaced)?; + self.prune_displaced_branches(transaction, displaced)?; }, } Ok(()) @@ -1870,21 +1855,13 @@ impl Backend { fn prune_displaced_branches( &self, transaction: &mut Transaction, - finalized: Block::Hash, - displaced: &FinalizationOutcome>, + displaced: &DisplacedLeavesAfterFinalization, ) -> ClientResult<()> { // Discard all blocks from displaced branches - for h in displaced.leaves() { - match sp_blockchain::tree_route(&self.blockchain, *h, finalized) { - Ok(tree_route) => - for r in tree_route.retracted() { - self.blockchain.insert_persisted_body_if_pinned(r.hash)?; - self.prune_block(transaction, BlockId::::hash(r.hash))?; - }, - Err(sp_blockchain::Error::UnknownBlock(_)) => { - // Sometimes routes can't be calculated. E.g. after warp sync. - }, - Err(e) => Err(e)?, + for (_, tree_route) in displaced.tree_routes.iter() { + for r in tree_route.retracted() { + self.blockchain.insert_persisted_body_if_pinned(r.hash)?; + self.prune_block(transaction, BlockId::::hash(r.hash))?; } } Ok(()) @@ -3190,6 +3167,9 @@ pub(crate) mod tests { #[test] fn test_leaves_pruned_on_finality() { + // / 1b - 2b - 3b + // 0 - 1a - 2a + // \ 1c let backend: Backend = Backend::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); @@ -3201,18 +3181,16 @@ pub(crate) mod tests { let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![block2_a, block2_b, block2_c, block1_c] - ); + let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into()); + + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]); backend.finalize_block(block1_a, None).unwrap(); backend.finalize_block(block2_a, None).unwrap(); - // leaves at same height stay. Leaves at lower heights pruned. - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); + // All leaves are pruned that are known to not belong to canonical branch + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); } #[test] diff --git a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs index 3c3f0beb6c6..94593f9c2c7 100644 --- a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs +++ b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs @@ -33,7 +33,7 @@ use sp_runtime::{ traits::{Block, Header, NumberFor, One}, Saturating, }; -use std::{collections::VecDeque, sync::Arc}; +use std::{collections::VecDeque, default::Default, sync::Arc}; /// `OffchainMMR` exposes MMR offchain canonicalization and pruning logic. pub struct OffchainMmr, C> { @@ -273,12 +273,11 @@ where self.write_gadget_state_or_log(); // Remove offchain MMR nodes for stale forks. - let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else( - |(stale_forks, e)| { - warn!(target: LOG_TARGET, "{:?}", e); - stale_forks - }, - ); + let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else(|e| { + warn!(target: LOG_TARGET, "{:?}", e); + + Default::default() + }); for hash in stale_forks.iter() { self.prune_branch(hash); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 363d11235dd..b195e05b664 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -2487,6 +2487,7 @@ async fn follow_report_multiple_pruned_block() { client.finalize_block(block_3_hash, None).unwrap(); // Finalizing block 3 directly will also result in block 1 and 2 being finalized. + // It will also mark block 2 and block 3 from the fork as pruned. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![ @@ -2494,7 +2495,7 @@ async fn follow_report_multiple_pruned_block() { format!("{:?}", block_2_hash), format!("{:?}", block_3_hash), ], - pruned_block_hashes: vec![], + pruned_block_hashes: vec![format!("{:?}", block_2_f_hash), format!("{:?}", block_3_f_hash)], }); assert_eq!(event, expected); @@ -2504,7 +2505,6 @@ async fn follow_report_multiple_pruned_block() { // ^^^ finalized // -> block 1 -> block 2_f -> block 3_f // - // Mark block 4 as finalized to force block 2_f and 3_f to get pruned. let block_4 = BlockBuilderBuilder::new(&*client) .on_parent_block(block_3.hash()) @@ -2535,11 +2535,11 @@ async fn follow_report_multiple_pruned_block() { }); assert_eq!(event, expected); - // Block 4 and 5 be reported as pruned, not just the stale head (block 5). + // Blocks from the fork were pruned earlier. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![format!("{:?}", block_4_hash)], - pruned_block_hashes: vec![format!("{:?}", block_2_f_hash), format!("{:?}", block_3_f_hash)], + pruned_block_hashes: vec![], }); assert_eq!(event, expected); } @@ -3714,16 +3714,8 @@ async fn follow_unique_pruned_blocks() { // The chainHead will see block 5 as the best block. However, the // client will finalize the block 6, which is on another fork. // - // When the block 6 is finalized, block 2 block 3 block 4 and block 5 are placed on an invalid - // fork. However, pruning of blocks happens on level N - 1. - // Therefore, no pruned blocks are reported yet. + // When the block 6 is finalized all blocks from the stale forks (2, 3, 4, 5) are pruned. // - // When the block 7 is finalized, block 3 is detected as stale. At this step, block 2 and 3 - // are reported as pruned. - // - // When the block 8 is finalized, block 5 block 4 and block 2 are detected as stale. However, - // only blocks 5 and 4 are reported as pruned. This is because the block 2 was previously - // reported. // Initial setup steps: let block_1_hash = @@ -3776,16 +3768,33 @@ async fn follow_unique_pruned_blocks() { }); assert_eq!(event, expected); - // Block 2 must be reported as pruned, even if it was the previous best. - let event: FollowEvent = get_next_event(&mut sub).await; + // All blocks from stale forks are pruned when we finalize block 6. + let mut event: FollowEvent = get_next_event(&mut sub).await; + + // Sort pruned block hashes to counter flaky test caused by event generation (get_pruned_hashes) + if let FollowEvent::Finalized(Finalized { pruned_block_hashes, .. }) = &mut event { + pruned_block_hashes.sort(); + } + let expected_pruned_block_hashes = { + let mut hashes = vec![ + format!("{:?}", block_2_hash), + format!("{:?}", block_3_hash), + format!("{:?}", block_4_hash), + format!("{:?}", block_5_hash), + ]; + hashes.sort(); + hashes + }; + let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![ format!("{:?}", block_1_hash), format!("{:?}", block_2_f_hash), format!("{:?}", block_6_hash), ], - pruned_block_hashes: vec![], + pruned_block_hashes: expected_pruned_block_hashes, }); + assert_eq!(event, expected); // Pruned hash can be unpinned. @@ -3802,9 +3811,10 @@ async fn follow_unique_pruned_blocks() { client.finalize_block(block_7_hash, None).unwrap(); let event: FollowEvent = get_next_event(&mut sub).await; + // All necessary blocks were pruned on block 6 finalization. let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![format!("{:?}", block_7_hash)], - pruned_block_hashes: vec![format!("{:?}", block_2_hash), format!("{:?}", block_3_hash)], + pruned_block_hashes: vec![], }); assert_eq!(event, expected); @@ -3815,10 +3825,11 @@ async fn follow_unique_pruned_blocks() { // Finalize the block 8. client.finalize_block(block_8_hash, None).unwrap(); + // All necessary blocks were pruned on block 6 finalization. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![format!("{:?}", block_8_hash)], - pruned_block_hashes: vec![format!("{:?}", block_4_hash), format!("{:?}", block_5_hash)], + pruned_block_hashes: vec![], }); assert_eq!(event, expected); } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 35e8b53a09c..3c25c233775 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -978,8 +978,12 @@ where // The stale heads are the leaves that will be displaced after the // block is finalized. - let stale_heads = - self.backend.blockchain().displaced_leaves_after_finalizing(block_number)?; + let stale_heads = self + .backend + .blockchain() + .displaced_leaves_after_finalizing(hash, block_number)? + .hashes() + .collect(); let header = self .backend diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 4fcb7c160cb..6542830c998 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -1164,7 +1164,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // G -> A1 -> A2 // \ - // -> B1 -> B2 + // -> B1 -> B2 -> B3 let mut finality_notifications = client.finality_notification_stream(); @@ -1249,8 +1249,8 @@ fn finalizing_diverged_block_should_trigger_reorg() { ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); - finality_notification_check(&mut finality_notifications, &[b1.hash()], &[]); - finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[a2.hash()]); + finality_notification_check(&mut finality_notifications, &[b1.hash()], &[a2.hash()]); + finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1371,8 +1371,12 @@ fn finality_notifications_content() { // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); - finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[c1.hash()]); - finality_notification_check(&mut finality_notifications, &[d3.hash(), d4.hash()], &[b2.hash()]); + finality_notification_check( + &mut finality_notifications, + &[a1.hash(), a2.hash()], + &[c1.hash(), b2.hash()], + ); + finality_notification_check(&mut finality_notifications, &[d3.hash(), d4.hash()], &[a3.hash()]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1601,9 +1605,9 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); ClientExt::finalize_block(&client, a3.hash(), None).unwrap(); - finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); + finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[b2.hash()]); - finality_notification_check(&mut finality_notifications, &[a3.hash()], &[b2.hash()]); + finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 7a09865f858..06e5b682964 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -21,14 +21,17 @@ use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justifications, }; -use std::collections::btree_set::BTreeSet; +use std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::header_metadata::HeaderMetadata; -use crate::error::{Error, Result}; +use crate::{ + error::{Error, Result}, + tree_route, TreeRoute, +}; /// Blockchain database header backend. Does not perform any validation. pub trait HeaderBackend: Send + Sync { @@ -89,62 +92,32 @@ pub trait HeaderBackend: Send + Sync { pub trait ForkBackend: HeaderMetadata + HeaderBackend + Send + Sync { - /// Best effort to get all the header hashes that are part of the provided forks - /// starting only from the fork heads. + /// Returns block hashes for provided fork heads. It skips the fork if when blocks are missing + /// (e.g. warp-sync) and internal `tree_route` function fails. /// - /// The function tries to reconstruct the route from the fork head to the canonical chain. - /// If any of the hashes on the route can't be found in the db, the function won't be able - /// to reconstruct the route anymore. In this case it will give up expanding the current fork, - /// move on to the next ones and at the end it will return an error that also contains - /// the partially expanded forks. + /// Example: + /// G --- A1 --- A2 --- A3 --- A4 ( < fork1 ) + /// \-----C4 --- C5 ( < fork2 ) + /// We finalize A3 and call expand_fork(C5). Result = (C5,C4). fn expand_forks( &self, fork_heads: &[Block::Hash], - ) -> std::result::Result, (BTreeSet, Error)> { - let mut missing_blocks = vec![]; + ) -> std::result::Result, Error> { let mut expanded_forks = BTreeSet::new(); for fork_head in fork_heads { - let mut route_head = *fork_head; - // Insert stale blocks hashes until canonical chain is reached. - // If we reach a block that is already part of the `expanded_forks` we can stop - // processing the fork. - while expanded_forks.insert(route_head) { - match self.header_metadata(route_head) { - Ok(meta) => { - // If the parent is part of the canonical chain or there doesn't exist a - // block hash for the parent number (bug?!), we can abort adding blocks. - let parent_number = meta.number.saturating_sub(1u32.into()); - match self.hash(parent_number) { - Ok(Some(parent_hash)) => - if parent_hash == meta.parent { - break - }, - Ok(None) | Err(_) => { - missing_blocks.push(BlockId::::Number(parent_number)); - break - }, - } - - route_head = meta.parent; - }, - Err(_e) => { - missing_blocks.push(BlockId::::Hash(route_head)); - break - }, - } + match tree_route(self, *fork_head, self.info().finalized_hash) { + Ok(tree_route) => { + for block in tree_route.retracted() { + expanded_forks.insert(block.hash); + } + continue + }, + Err(_) => { + // There are cases when blocks are missing (e.g. warp-sync). + }, } } - if !missing_blocks.is_empty() { - return Err(( - expanded_forks, - Error::UnknownBlocks(format!( - "Missing stale headers {:?} while expanding forks {:?}.", - fork_heads, missing_blocks - )), - )) - } - Ok(expanded_forks) } } @@ -172,14 +145,6 @@ pub trait Backend: /// Results must be ordered best (longest, highest) chain first. fn leaves(&self) -> Result>; - /// Returns displaced leaves after the given block would be finalized. - /// - /// The returned leaves do not contain the leaves from the same height as `block_number`. - fn displaced_leaves_after_finalizing( - &self, - block_number: NumberFor, - ) -> Result>; - /// Return hashes of all blocks that are children of the block with `parent_hash`. fn children(&self, parent_hash: Block::Hash) -> Result>; @@ -255,6 +220,67 @@ pub trait Backend: } fn block_indexed_body(&self, hash: Block::Hash) -> Result>>>; + + /// Returns all leaves that will be displaced after the block finalization. + fn displaced_leaves_after_finalizing( + &self, + finalized_block_hash: Block::Hash, + finalized_block_number: NumberFor, + ) -> std::result::Result, Error> { + let mut result = DisplacedLeavesAfterFinalization::default(); + + if finalized_block_number == Zero::zero() { + return Ok(result) + } + + // For each leaf determine whether it belongs to a non-canonical branch. + for leaf_hash in self.leaves()? { + let leaf_block_header = self.expect_header(leaf_hash)?; + let leaf_number = *leaf_block_header.number(); + + let leaf_tree_route = match tree_route(self, leaf_hash, finalized_block_hash) { + Ok(tree_route) => tree_route, + Err(Error::UnknownBlock(_)) => { + // Sometimes routes can't be calculated. E.g. after warp sync. + continue; + }, + Err(e) => Err(e)?, + }; + + // Is it a stale fork? + let needs_pruning = leaf_tree_route.common_block().hash != finalized_block_hash; + + if needs_pruning { + result.displaced_leaves.insert(leaf_hash, leaf_number); + result.tree_routes.insert(leaf_hash, leaf_tree_route); + } + } + + Ok(result) + } +} + +/// Result of [`Backend::displaced_leaves_after_finalizing`]. +#[derive(Clone, Debug)] +pub struct DisplacedLeavesAfterFinalization { + /// A collection of hashes and block numbers for displaced leaves. + pub displaced_leaves: BTreeMap>, + + /// A collection of tree routes from the leaves to finalized block. + pub tree_routes: BTreeMap>, +} + +impl Default for DisplacedLeavesAfterFinalization { + fn default() -> Self { + Self { displaced_leaves: Default::default(), tree_routes: Default::default() } + } +} + +impl DisplacedLeavesAfterFinalization { + /// Returns a collection of hashes for the displaced leaves. + pub fn hashes(&self) -> impl Iterator + '_ { + self.displaced_leaves.keys().cloned() + } } /// Blockchain info diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index ccd640c0567..27caaae71ad 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -97,7 +97,7 @@ pub fn lowest_common_ancestor + ?Sized>( } /// Compute a tree-route between two blocks. See tree-route docs for more details. -pub fn tree_route>( +pub fn tree_route + ?Sized>( backend: &T, from: Block::Hash, to: Block::Hash, -- GitLab From 494f12319e538f60260bcb139c8900e2eaf5c329 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Wed, 15 May 2024 11:53:53 +0200 Subject: [PATCH 005/106] Bump fatality crate to 0.1.1 (#4464) ... to get rid of the macro debug logs like this from build output ``` [/home/alexggh/.cargo/registry/src/index.crates.io-6f17d22bba15001f/fatality-proc-macro-0.1.0/src/types.rs:171:23] input = TokenStream [ Literal { kind: Str, symbol: "Error while accessing runtime information", suffix: None, span: #0 bytes(6943..6986), }, ] ``` Signed-off-by: Alexandru Gheorghe --- Cargo.lock | 10 ++++------ polkadot/node/core/backing/Cargo.toml | 2 +- polkadot/node/core/dispute-coordinator/Cargo.toml | 2 +- polkadot/node/core/prospective-parachains/Cargo.toml | 2 +- polkadot/node/core/provisioner/Cargo.toml | 2 +- .../node/network/availability-distribution/Cargo.toml | 2 +- polkadot/node/network/availability-recovery/Cargo.toml | 2 +- polkadot/node/network/bridge/Cargo.toml | 2 +- polkadot/node/network/collator-protocol/Cargo.toml | 2 +- polkadot/node/network/dispute-distribution/Cargo.toml | 2 +- polkadot/node/network/protocol/Cargo.toml | 2 +- .../node/network/statement-distribution/Cargo.toml | 2 +- polkadot/node/subsystem-util/Cargo.toml | 2 +- 13 files changed, 16 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b2576d1adf..7ff2da4f547 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5362,20 +5362,19 @@ dependencies = [ [[package]] name = "fatality" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ecdc33d04db74fc23db9f54f6f314c61d29f810d58ba423d0c204888365458" +checksum = "ec6f82451ff7f0568c6181287189126d492b5654e30a788add08027b6363d019" dependencies = [ "fatality-proc-macro", - "syn 2.0.61", "thiserror", ] [[package]] name = "fatality-proc-macro" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac67b350787fd4a934752e30ddb45569da000e14bf3e499224302778b7a918ab" +checksum = "eb42427514b063d97ce21d5199f36c0c307d981434a6be32582bc79fe5bd2303" dependencies = [ "expander", "indexmap 2.2.3", @@ -5383,7 +5382,6 @@ dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", "syn 2.0.61", - "thiserror", ] [[package]] diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 9829f1b37cf..f426f73284e 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -21,7 +21,7 @@ statement-table = { package = "polkadot-statement-table", path = "../../../state bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } -fatality = "0.1.0" +fatality = "0.1.1" schnellru = "0.2.1" [dev-dependencies] diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 938fdce1cb8..dba3bcdd643 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -16,7 +16,7 @@ parity-scale-codec = "3.6.1" kvdb = "0.13.0" thiserror = { workspace = true } schnellru = "0.2.1" -fatality = "0.1.0" +fatality = "0.1.1" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 8d0aec96f01..d38a23c3fda 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" thiserror = { workspace = true } -fatality = "0.1.0" +fatality = "0.1.1" bitvec = "1" polkadot-primitives = { path = "../../../primitives" } diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 1cd16e6599a..d1978321264 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" -fatality = "0.1.0" +fatality = "0.1.1" schnellru = "0.2.1" [dev-dependencies] diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 344389d224f..ff352944908 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -25,7 +25,7 @@ thiserror = { workspace = true } rand = "0.8.5" derive_more = "0.99.17" schnellru = "0.2.1" -fatality = "0.1.0" +fatality = "0.1.1" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index b7f817d8523..d12c1b1cff9 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" tokio = "1.37" schnellru = "0.2.1" rand = "0.8.5" -fatality = "0.1.0" +fatality = "0.1.1" thiserror = { workspace = true } async-trait = "0.1.79" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 8ab571fbe7c..4bb49baba92 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } parking_lot = "0.12.1" bytes = "1" -fatality = "0.1.0" +fatality = "0.1.1" thiserror = { workspace = true } [dev-dependencies] diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 201615ac9ff..aa60c0166d2 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-network-protocol = { path = "../protocol" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = { path = "../../subsystem" } -fatality = "0.1.0" +fatality = "0.1.1" thiserror = { workspace = true } tokio-util = "0.7.1" diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 7d9a719d852..eb8a7606304 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -25,7 +25,7 @@ sc-network = { path = "../../../../substrate/client/network" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } thiserror = { workspace = true } -fatality = "0.1.0" +fatality = "0.1.1" schnellru = "0.2.1" indexmap = "2.0.0" diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 29975fc735f..2b741051b4f 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -24,7 +24,7 @@ sp-runtime = { path = "../../../../substrate/primitives/runtime" } strum = { version = "0.26.2", features = ["derive"] } futures = "0.3.30" thiserror = { workspace = true } -fatality = "0.1.0" +fatality = "0.1.1" rand = "0.8" derive_more = "0.99" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index b5cfeaa0c1e..da8c91a0a29 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -24,7 +24,7 @@ arrayvec = "0.7.4" indexmap = "2.0.0" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { workspace = true } -fatality = "0.1.0" +fatality = "0.1.1" bitvec = "1" [dev-dependencies] diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index b6cc368b924..492a9847c96 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" thiserror = { workspace = true } -fatality = "0.1.0" +fatality = "0.1.1" gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" schnellru = "0.2.1" -- GitLab From 005e3a77150d6bed78114763791450f54f1ba245 Mon Sep 17 00:00:00 2001 From: RadiumBlock Date: Wed, 15 May 2024 05:11:37 -0500 Subject: [PATCH 006/106] Please consider adding RadiumBlock bootnodes for Westend People and Westend Coretime (#3936) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Please add RadiumBlock bootnodes to the spec files for Westend Coretime and Westend People --------- Co-authored-by: Dรณnal Murray --- cumulus/parachains/chain-specs/coretime-westend.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index ab2e97fdbf4..74edd5b2cd9 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -5,6 +5,8 @@ "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", -- GitLab From 3fcdada318bfee0b8da945b78496a8191375cf26 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Wed, 15 May 2024 12:16:46 +0200 Subject: [PATCH 007/106] chore: update zombienet version (#4463) Add small fixes, reorg debug definition and add more verbose debug logs in k8s logs reading. Thx! --- .gitlab/pipeline/zombienet.yml | 3 ++- .gitlab/pipeline/zombienet/cumulus.yml | 1 - .gitlab/pipeline/zombienet/polkadot.yml | 1 - .gitlab/pipeline/zombienet/substrate.yml | 1 - 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 52948e1eb71..7be4ba1663e 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,8 +1,9 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.99" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.103" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" + DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" include: # substrate tests diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index c473f5c5fed..a7f321505ba 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -15,7 +15,6 @@ - echo "${COL_IMAGE}" - echo "${GH_DIR}" - echo "${LOCAL_DIR}" - - export DEBUG=zombie - export RELAY_IMAGE=${POLKADOT_IMAGE} - export COL_IMAGE=${COL_IMAGE} diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 38c5332f309..a9f0eb93033 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -10,7 +10,6 @@ - if [[ $CI_COMMIT_REF_NAME == *"gh-readonly-queue"* ]]; then export DOCKER_IMAGES_VERSION="${CI_COMMIT_SHORT_SHA}"; fi - export PIPELINE_IMAGE_TAG=${DOCKER_IMAGES_VERSION} - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 8a627c454f9..2013ffd571c 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -13,7 +13,6 @@ - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" - echo "${LOCAL_DIR}" - - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${SUBSTRATE_IMAGE}":${SUBSTRATE_IMAGE_TAG} - echo "${ZOMBIENET_INTEGRATION_TEST_IMAGE}" stage: zombienet -- GitLab From e6d934c965f22ae1fadd7e61dbdded72a61b5aa2 Mon Sep 17 00:00:00 2001 From: drewstone Date: Wed, 15 May 2024 05:23:22 -0600 Subject: [PATCH 008/106] Update BABE README.md (#4138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Link was dead --------- Co-authored-by: Bastian Kรถcher --- .config/lychee.toml | 3 --- polkadot/runtime/rococo/constants/src/lib.rs | 2 +- polkadot/runtime/test-runtime/constants/src/lib.rs | 2 +- polkadot/runtime/westend/constants/src/lib.rs | 2 +- substrate/bin/node/runtime/src/constants.rs | 2 +- substrate/client/consensus/babe/README.md | 2 +- substrate/client/consensus/babe/src/lib.rs | 2 +- 7 files changed, 6 insertions(+), 9 deletions(-) diff --git a/.config/lychee.toml b/.config/lychee.toml index ad6a0ef7554..b7b5b83f35b 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -34,11 +34,8 @@ exclude = [ "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", "https://polkadot-try-runtime-node.parity-chains.parity.io/", "https://polkadot.network/the-path-of-a-parachain-block/", - "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 9209045364c..89d5deb86f1 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -57,7 +57,7 @@ pub mod time { // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. // The choice of is done in accordance to the slot duration and expected target // block time, for safely resisting network delays of maximum two seconds. - // + // pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } diff --git a/polkadot/runtime/test-runtime/constants/src/lib.rs b/polkadot/runtime/test-runtime/constants/src/lib.rs index 77c83b063cf..2422762ca38 100644 --- a/polkadot/runtime/test-runtime/constants/src/lib.rs +++ b/polkadot/runtime/test-runtime/constants/src/lib.rs @@ -45,7 +45,7 @@ pub mod time { // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. // The choice of is done in accordance to the slot duration and expected target // block time, for safely resisting network delays of maximum two seconds. - // + // pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index c98f4b114fd..1a4c1f31106 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -52,7 +52,7 @@ pub mod time { // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. // The choice of is done in accordance to the slot duration and expected target // block time, for safely resisting network delays of maximum two seconds. - // + // pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } diff --git a/substrate/bin/node/runtime/src/constants.rs b/substrate/bin/node/runtime/src/constants.rs index e4fafbf0fa4..d13dca48d1f 100644 --- a/substrate/bin/node/runtime/src/constants.rs +++ b/substrate/bin/node/runtime/src/constants.rs @@ -50,7 +50,7 @@ pub mod time { /// always be assigned, in which case `MILLISECS_PER_BLOCK` and /// `SLOT_DURATION` should have the same value. /// - /// + /// pub const MILLISECS_PER_BLOCK: Moment = 3000; pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; diff --git a/substrate/client/consensus/babe/README.md b/substrate/client/consensus/babe/README.md index a3cf944b513..47b5820ff71 100644 --- a/substrate/client/consensus/babe/README.md +++ b/substrate/client/consensus/babe/README.md @@ -43,6 +43,6 @@ primary blocks in the chain. We will pick the heaviest chain (more primary blocks) and will go with the longest one in case of a tie. An in-depth description and analysis of the protocol can be found here: - + License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index a1ca6fd23b5..0c85de24004 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -61,7 +61,7 @@ //! blocks) and will go with the longest one in case of a tie. //! //! An in-depth description and analysis of the protocol can be found here: -//! +//! #![forbid(unsafe_code)] #![warn(missing_docs)] -- GitLab From 4d47b4431beaf12ed9ea45c1fb97ae87f0d145cd Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 15 May 2024 14:00:24 +0200 Subject: [PATCH 009/106] Introduces: Delegated Staking Pallet (#3904) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the second PR in preparation for https://github.com/paritytech/polkadot-sdk/issues/454. ## Also see - **Precursor** https://github.com/paritytech/polkadot-sdk/pull/3889. - **Follow up** https://github.com/paritytech/polkadot-sdk/pull/3905. Overall changes are documented here (lot more visual ๐Ÿ˜): https://hackmd.io/@ak0n/454-np-governance ## Changes ### Delegation Interface Provides delegation primitives for staking. Introduces two new roles: - Agent: These are accounts who receive delegation from other accounts (delegators) and stakes on behalf of them. The funds are held in delegator accounts. - Delegator: Accounts who delegate their funds to an agent authorising them to use it for staking. Supports - A way for delegators to add or withdraw delegation to an agent. - A way for an agent to slash a delegator during a slashing event. ### Pallet Delegated Staking - Implements `DelegationInterface`. - Lazy slashing: Any slashes to an Agent is posted in a ledger but not immediately slashed. The agent can call `DelegationInterface::delegator_slash` to slash the member and clear the corresponding slash from its ledger. - Consumes `StakingInterface` to provide `CoreStaking` features. In reality, this will be `pallet-staking`. - Ensures bookkeeping for agent and delegator are correct but leaves the management of reward and slash logic upto the consumer of this pallet. - While it does not expose any calls yet, it is written with the intent of exposing these primitives via extrinsics. ## TODO - [x] Improve unit tests in the pallet. - [x] Separate slash reward perbill for rewarding the slash reporters? - [x] Review if we should add more events. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Gonรงalo Pestana Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> --- Cargo.lock | 22 + Cargo.toml | 1 + prdoc/pr_3904.prdoc | 19 + substrate/frame/delegated-staking/Cargo.toml | 69 ++ .../frame/delegated-staking/src/impls.rs | 149 ++++ substrate/frame/delegated-staking/src/lib.rs | 815 ++++++++++++++++++ substrate/frame/delegated-staking/src/mock.rs | 308 +++++++ .../frame/delegated-staking/src/tests.rs | 685 +++++++++++++++ .../frame/delegated-staking/src/types.rs | 292 +++++++ substrate/frame/staking/src/lib.rs | 2 +- substrate/primitives/staking/src/lib.rs | 119 +++ 11 files changed, 2480 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_3904.prdoc create mode 100644 substrate/frame/delegated-staking/Cargo.toml create mode 100644 substrate/frame/delegated-staking/src/impls.rs create mode 100644 substrate/frame/delegated-staking/src/lib.rs create mode 100644 substrate/frame/delegated-staking/src/mock.rs create mode 100644 substrate/frame/delegated-staking/src/tests.rs create mode 100644 substrate/frame/delegated-staking/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index 7ff2da4f547..c91025ea34c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10123,6 +10123,28 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-delegated-staking" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "substrate-test-utils", +] + [[package]] name = "pallet-democracy" version = "28.0.0" diff --git a/Cargo.toml b/Cargo.toml index 1440c2d497d..dcf410daa1f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -328,6 +328,7 @@ members = [ "substrate/frame/contracts/uapi", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", + "substrate/frame/delegated-staking", "substrate/frame/democracy", "substrate/frame/election-provider-multi-phase", "substrate/frame/election-provider-multi-phase/test-staking-e2e", diff --git a/prdoc/pr_3904.prdoc b/prdoc/pr_3904.prdoc new file mode 100644 index 00000000000..694f9b44387 --- /dev/null +++ b/prdoc/pr_3904.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce pallet-delegated-staking + +doc: + - audience: Runtime Dev + description: | + Adds a new pallet `delegated-staking` that allows delegators to delegate their funds to agents who can stake + these funds on behalf of them. This would be used by Nomination Pools to migrate into a delegation staking based + pool. + +crates: + - name: pallet-delegated-staking + bump: patch + - name: pallet-staking + bump: patch + - name: sp-staking + bump: minor diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml new file mode 100644 index 00000000000..a9cbd758ed0 --- /dev/null +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "pallet-delegated-staking" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME delegated staking pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } + +[dev-dependencies] +sp-core = { path = "../../primitives/core" } +sp-io = { path = "../../primitives/io" } +substrate-test-utils = { path = "../../test-utils" } +sp-tracing = { path = "../../primitives/tracing" } +pallet-staking = { path = "../staking" } +pallet-balances = { path = "../balances" } +pallet-timestamp = { path = "../timestamp" } +pallet-staking-reward-curve = { path = "../staking/reward-curve" } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-election-provider-support/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-staking/std", + "pallet-timestamp/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-staking/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", +] +try-runtime = [ + "frame-election-provider-support/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs new file mode 100644 index 00000000000..b1945b0ce37 --- /dev/null +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -0,0 +1,149 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementations of public traits, namely [`DelegationInterface`] and [`OnStakingUpdate`]. + +use super::*; +use sp_staking::{DelegationInterface, DelegationMigrator, OnStakingUpdate}; + +impl DelegationInterface for Pallet { + type Balance = BalanceOf; + type AccountId = T::AccountId; + + /// Effective balance of the `Agent` account. + fn agent_balance(who: &Self::AccountId) -> Self::Balance { + Agent::::get(who) + .map(|agent| agent.ledger.effective_balance()) + .unwrap_or_default() + } + + fn delegator_balance(delegator: &Self::AccountId) -> Self::Balance { + Delegation::::get(delegator).map(|d| d.amount).unwrap_or_default() + } + + /// Delegate funds to an `Agent`. + fn delegate( + who: &Self::AccountId, + agent: &Self::AccountId, + reward_account: &Self::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + Pallet::::register_agent( + RawOrigin::Signed(agent.clone()).into(), + reward_account.clone(), + )?; + + // Delegate the funds from who to the `Agent` account. + Pallet::::delegate_to_agent(RawOrigin::Signed(who.clone()).into(), agent.clone(), amount) + } + + /// Add more delegation to the `Agent` account. + fn delegate_extra( + who: &Self::AccountId, + agent: &Self::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + Pallet::::delegate_to_agent(RawOrigin::Signed(who.clone()).into(), agent.clone(), amount) + } + + /// Withdraw delegation of `delegator` to `Agent`. + /// + /// If there are funds in `Agent` account that can be withdrawn, then those funds would be + /// unlocked/released in the delegator's account. + fn withdraw_delegation( + delegator: &Self::AccountId, + agent: &Self::AccountId, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult { + Pallet::::release_delegation( + RawOrigin::Signed(agent.clone()).into(), + delegator.clone(), + amount, + num_slashing_spans, + ) + } + + /// Returns true if the `Agent` have any slash pending to be applied. + fn has_pending_slash(agent: &Self::AccountId) -> bool { + Agent::::get(agent) + .map(|d| !d.ledger.pending_slash.is_zero()) + .unwrap_or(false) + } + + fn delegator_slash( + agent: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + maybe_reporter: Option, + ) -> sp_runtime::DispatchResult { + Pallet::::do_slash(agent.clone(), delegator.clone(), value, maybe_reporter) + } +} + +impl DelegationMigrator for Pallet { + type Balance = BalanceOf; + type AccountId = T::AccountId; + + fn migrate_nominator_to_agent( + agent: &Self::AccountId, + reward_account: &Self::AccountId, + ) -> DispatchResult { + Pallet::::migrate_to_agent( + RawOrigin::Signed(agent.clone()).into(), + reward_account.clone(), + ) + } + + fn migrate_delegation( + agent: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + ) -> DispatchResult { + Pallet::::migrate_delegation( + RawOrigin::Signed(agent.clone()).into(), + delegator.clone(), + value, + ) + } +} + +impl OnStakingUpdate> for Pallet { + fn on_slash( + who: &T::AccountId, + _slashed_active: BalanceOf, + _slashed_unlocking: &sp_std::collections::btree_map::BTreeMap>, + slashed_total: BalanceOf, + ) { + >::mutate(who, |maybe_register| match maybe_register { + // if existing agent, register the slashed amount as pending slash. + Some(register) => register.pending_slash.saturating_accrue(slashed_total), + None => { + // nothing to do + }, + }); + } + + fn on_withdraw(stash: &T::AccountId, amount: BalanceOf) { + // if there is a withdraw to the agent, then add it to the unclaimed withdrawals. + let _ = Agent::::get(stash) + // can't do anything if there is an overflow error. Just raise a defensive error. + .and_then(|agent| agent.add_unclaimed_withdraw(amount).defensive()) + .map(|agent| agent.save()); + } +} diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs new file mode 100644 index 00000000000..210f69d9c83 --- /dev/null +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -0,0 +1,815 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Delegated Staking Pallet +//! +//! This pallet implements [`sp_staking::DelegationInterface`] that provides delegation +//! functionality to `delegators` and `agents`. It is designed to be used in conjunction with +//! [`StakingInterface`] and relies on [`Config::CoreStaking`] to provide primitive staking +//! functions. +//! +//! Currently, it does not expose any dispatchable calls but is written with a vision to expose them +//! in the future such that it can be utilised by any external account, off-chain entity or xcm +//! `MultiLocation` such as a parachain or a smart contract. +//! +//! ## Key Terminologies +//! - **Agent**: An account who accepts delegations from other accounts and act as an agent on their +//! behalf for staking these delegated funds. Also, sometimes referred as `Delegatee`. +//! - **Delegator**: An account who delegates their funds to an `agent` and authorises them to use +//! it for staking. +//! - **AgentLedger**: A data structure that holds important information about the `agent` such as +//! total delegations they have received, any slashes posted to them, etc. +//! - **Delegation**: A data structure that stores the amount of funds delegated to an `agent` by a +//! `delegator`. +//! +//! ## Goals +//! +//! Direct nomination on the Staking pallet does not scale well. Nominations pools were created to +//! address this by pooling delegator funds into one account and then staking it. This though had +//! a very critical limitation that the funds were moved from delegator account to pool account +//! and hence the delegator lost control over their funds for using it for other purposes such as +//! governance. This pallet aims to solve this by extending the staking pallet to support a new +//! primitive function: delegation of funds to an `agent` with the intent of staking. The agent can +//! then stake the delegated funds to [`Config::CoreStaking`] on behalf of the delegators. +//! +//! ### Withdrawal Management +//! Agent unbonding does not regulate ordering of consequent withdrawal for delegators. This is upto +//! the consumer of this pallet to implement in what order unbondable funds from +//! [`Config::CoreStaking`] can be withdrawn by the delegators. +//! +//! ### Reward and Slashing +//! This pallet does not enforce any specific strategy for how rewards or slashes are applied. It +//! is upto the `agent` account to decide how to apply the rewards and slashes. +//! +//! This importantly allows clients of this pallet to build their own strategies for reward/slashes. +//! For example, an `agent` account can choose to first slash the reward pot before slashing the +//! delegators. Or part of the reward can go to an insurance fund that can be used to cover any +//! potential future slashes. The goal is to eventually allow foreign MultiLocations +//! (smart contracts or pallets on another chain) to build their own pooled staking solutions +//! similar to `NominationPools`. + +//! ## Core functions +//! +//! - Allow an account to receive delegations. See [`Pallet::register_agent`]. +//! - Delegate funds to an `agent` account. See [`Pallet::delegate_to_agent`]. +//! - Release delegated funds from an `agent` account to the `delegator`. See +//! [`Pallet::release_delegation`]. +//! - Migrate a `Nominator` account to an `agent` account. See [`Pallet::migrate_to_agent`]. +//! Explained in more detail in the `Migration` section. +//! - Migrate unclaimed delegated funds from `agent` to delegator. When a nominator migrates to an +//! agent, the funds are held in a proxy account. This function allows the delegator to claim their +//! share of the funds from the proxy account. See [`Pallet::migrate_delegation`]. +//! +//! ## Lazy Slashing +//! One of the reasons why direct nominators on staking pallet cannot scale well is because all +//! nominators are slashed at the same time. This is expensive and needs to be bounded operation. +//! +//! This pallet implements a lazy slashing mechanism. Any slashes to the `agent` are posted in its +//! `AgentLedger` as a pending slash. Since the actual amount is held in the multiple +//! `delegator` accounts, this pallet has no way to know how to apply slash. It is the `agent`'s +//! responsibility to apply slashes for each delegator, one at a time. Staking pallet ensures the +//! pending slash never exceeds staked amount and would freeze further withdraws until all pending +//! slashes are cleared. +//! +//! The user of this pallet can apply slash using +//! [DelegationInterface::delegator_slash](sp_staking::DelegationInterface::delegator_slash). +//! +//! ## Migration from Nominator to Agent +//! More details [here](https://hackmd.io/@ak0n/454-np-governance). +//! +//! ## Nomination Pool vs Delegation Staking +//! This pallet is not a replacement for Nomination Pool but adds a new primitive in addition to +//! staking pallet that can be used by Nomination Pool to support delegation based staking. It can +//! be thought of as an extension to the Staking Pallet in relation to Nomination Pools. +//! Technically, these changes could be made in one of those pallets as well but that would have +//! meant significant refactoring and high chances of introducing a regression. With this approach, +//! we can keep the existing pallets with minimal changes and introduce a new pallet that can be +//! optionally used by Nomination Pool. The vision is to build this in a configurable way such that +//! runtime can choose whether to use this pallet or not. +//! +//! With that said, following is the main difference between +//! #### Nomination Pool without delegation support +//! 1) transfer fund from delegator to pool account, and +//! 2) stake from pool account as a direct nominator. +//! +//! #### Nomination Pool with delegation support +//! 1) delegate fund from delegator to pool account, and +//! 2) stake from pool account as an `Agent` account on the staking pallet. +//! +//! The difference being, in the second approach, the delegated funds will be locked in-place in +//! user's account enabling them to participate in use cases that allows use of `held` funds such +//! as participation in governance voting. +//! +//! Nomination pool still does all the heavy lifting around pool administration, reward +//! distribution, lazy slashing and as such, is not meant to be replaced with this pallet. +//! +//! ## Limitations +//! - Rewards can not be auto-compounded. +//! - Slashes are lazy and hence there could be a period of time when an account can use funds for +//! operations such as voting in governance even though they should be slashed. + +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(rustdoc::broken_intra_doc_links)] + +mod impls; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +mod types; + +pub use pallet::*; + +use types::*; + +use frame_support::{ + pallet_prelude::*, + traits::{ + fungible::{ + hold::{ + Balanced as FunHoldBalanced, Inspect as FunHoldInspect, Mutate as FunHoldMutate, + }, + Balanced, Inspect as FunInspect, Mutate as FunMutate, + }, + tokens::{fungible::Credit, Fortitude, Precision, Preservation}, + Defensive, DefensiveOption, Imbalance, OnUnbalanced, + }, +}; +use sp_runtime::{ + traits::{AccountIdConversion, CheckedAdd, CheckedSub, Zero}, + ArithmeticError, DispatchResult, Perbill, RuntimeDebug, Saturating, +}; +use sp_staking::{EraIndex, StakingInterface, StakingUnchecked}; +use sp_std::{convert::TryInto, prelude::*}; + +pub type BalanceOf = + <::Currency as FunInspect<::AccountId>>::Balance; + +use frame_system::{ensure_signed, pallet_prelude::*, RawOrigin}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Injected identifier for the pallet. + #[pallet::constant] + type PalletId: Get; + + /// Currency type. + type Currency: FunHoldMutate + + FunMutate + + FunHoldBalanced; + + /// Handler for the unbalanced reduction when slashing a delegator. + type OnSlash: OnUnbalanced>; + + /// Fraction of the slash that is rewarded to the caller of pending slash to the agent. + #[pallet::constant] + type SlashRewardFraction: Get; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// Core staking implementation. + type CoreStaking: StakingUnchecked, AccountId = Self::AccountId>; + } + + #[pallet::error] + pub enum Error { + /// The account cannot perform this operation. + NotAllowed, + /// An existing staker cannot perform this action. + AlreadyStaking, + /// Reward Destination cannot be same as `Agent` account. + InvalidRewardDestination, + /// Delegation conditions are not met. + /// + /// Possible issues are + /// 1) Cannot delegate to self, + /// 2) Cannot delegate to multiple delegates. + InvalidDelegation, + /// The account does not have enough funds to perform the operation. + NotEnoughFunds, + /// Not an existing `Agent` account. + NotAgent, + /// Not a Delegator account. + NotDelegator, + /// Some corruption in internal state. + BadState, + /// Unapplied pending slash restricts operation on `Agent`. + UnappliedSlash, + /// `Agent` has no pending slash to be applied. + NothingToSlash, + /// Failed to withdraw amount from Core Staking. + WithdrawFailed, + /// Operation not supported by this pallet. + NotSupported, + } + + /// A reason for placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// Funds held for stake delegation to another account. + #[codec(index = 0)] + StakingDelegation, + } + + #[pallet::event] + #[pallet::generate_deposit(pub (super) fn deposit_event)] + pub enum Event { + /// Funds delegated by a delegator. + Delegated { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + /// Funds released to a delegator. + Released { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + /// Funds slashed from a delegator. + Slashed { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + } + + /// Map of Delegators to their `Delegation`. + /// + /// Implementation note: We are not using a double map with `delegator` and `agent` account + /// as keys since we want to restrict delegators to delegate only to one account at a time. + #[pallet::storage] + pub(crate) type Delegators = + CountedStorageMap<_, Twox64Concat, T::AccountId, Delegation, OptionQuery>; + + /// Map of `Agent` to their `Ledger`. + #[pallet::storage] + pub(crate) type Agents = + CountedStorageMap<_, Twox64Concat, T::AccountId, AgentLedger, OptionQuery>; + + // This pallet is not currently written with the intention of exposing any calls. But the + // functions defined in the following impl block should act as a good reference for how the + // exposed calls would look like when exposed. + impl Pallet { + /// Register an account to become a stake `Agent`. Sometimes also called a `Delegatee`. + /// + /// Delegators can authorize `Agent`s to stake on their behalf by delegating their funds to + /// them. The `Agent` can then use the delegated funds to stake to [`Config::CoreStaking`]. + /// + /// An account that is directly staked to [`Config::CoreStaking`] cannot become an `Agent`. + /// However, they can migrate to become an agent using [`Self::migrate_to_agent`]. + /// + /// Implementation note: This function allows any account to become an agent. It is + /// important though that accounts that call [`StakingUnchecked::virtual_bond`] are keyless + /// accounts. This is not a problem for now since this is only used by other pallets in the + /// runtime which use keyless account as agents. If we later want to expose this as a + /// dispatchable call, we should derive a sub-account from the caller and use that as the + /// agent account. + pub fn register_agent( + origin: OriginFor, + reward_account: T::AccountId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // Existing `agent` cannot register again and a delegator cannot become an `agent`. + ensure!(!Self::is_agent(&who) && !Self::is_delegator(&who), Error::::NotAllowed); + + // They cannot be already a direct staker in the staking pallet. + ensure!(!Self::is_direct_staker(&who), Error::::AlreadyStaking); + + // Reward account cannot be same as `agent` account. + ensure!(reward_account != who, Error::::InvalidRewardDestination); + + Self::do_register_agent(&who, &reward_account); + Ok(()) + } + + /// Migrate from a `Nominator` account to `Agent` account. + /// + /// The origin needs to + /// - be a `Nominator` with [`Config::CoreStaking`], + /// - not already an `Agent`, + /// + /// This function will create a proxy account to the agent called `proxy_delegator` and + /// transfer the directly staked amount by the agent to it. The `proxy_delegator` delegates + /// the funds to the origin making origin an `Agent` account. The real `delegator` + /// accounts of the origin can later migrate their funds using [Self::migrate_delegation] to + /// claim back their share of delegated funds from `proxy_delegator` to self. + /// + /// Any free fund in the agent's account will be marked as unclaimed withdrawal. + pub fn migrate_to_agent( + origin: OriginFor, + reward_account: T::AccountId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + // ensure who is a staker in `CoreStaking` but not already an agent or a delegator. + ensure!( + Self::is_direct_staker(&who) && !Self::is_agent(&who) && !Self::is_delegator(&who), + Error::::NotAllowed + ); + + // Reward account cannot be same as `agent` account. + ensure!(reward_account != who, Error::::InvalidRewardDestination); + + Self::do_migrate_to_agent(&who, &reward_account) + } + + /// Release previously delegated funds by delegator to origin. + /// + /// Only agents can call this. + /// + /// Tries to withdraw unbonded funds from `CoreStaking` if needed and release amount to + /// `delegator`. + pub fn release_delegation( + origin: OriginFor, + delegator: T::AccountId, + amount: BalanceOf, + num_slashing_spans: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_release(&who, &delegator, amount, num_slashing_spans) + } + + /// Migrate delegated funds that are held in `proxy_delegator` to the claiming `delegator`'s + /// account. If successful, the specified funds will be moved and delegated from `delegator` + /// account to the agent. + /// + /// This can be called by `agent` accounts that were previously a direct `Nominator` with + /// [`Config::CoreStaking`] and has some remaining unclaimed delegations. + /// + /// Internally, it moves some delegations from `proxy_delegator` account to `delegator` + /// account and reapplying the holds. + pub fn migrate_delegation( + origin: OriginFor, + delegator: T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + let agent = ensure_signed(origin)?; + + // Ensure they have minimum delegation. + ensure!(amount >= T::Currency::minimum_balance(), Error::::NotEnoughFunds); + + // Ensure delegator is sane. + ensure!(!Self::is_agent(&delegator), Error::::NotAllowed); + ensure!(!Self::is_delegator(&delegator), Error::::NotAllowed); + ensure!(!Self::is_direct_staker(&delegator), Error::::AlreadyStaking); + + // ensure agent is sane. + ensure!(Self::is_agent(&agent), Error::::NotAgent); + + // and has enough delegated balance to migrate. + let proxy_delegator = Self::sub_account(AccountType::ProxyDelegator, agent); + let balance_remaining = Self::held_balance_of(&proxy_delegator); + ensure!(balance_remaining >= amount, Error::::NotEnoughFunds); + + Self::do_migrate_delegation(&proxy_delegator, &delegator, amount) + } + + /// Delegate given `amount` of tokens to an `Agent` account. + /// + /// If `origin` is the first time delegator, we add them to state. If they are already + /// delegating, we increase the delegation. + /// + /// Conditions: + /// - Delegators cannot delegate to more than one agent. + /// - The `agent` account should already be registered as such. See + /// [`Self::register_agent`]. + pub fn delegate_to_agent( + origin: OriginFor, + agent: T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + let delegator = ensure_signed(origin)?; + + // ensure delegator is sane. + ensure!( + Delegation::::can_delegate(&delegator, &agent), + Error::::InvalidDelegation + ); + ensure!(!Self::is_direct_staker(&delegator), Error::::AlreadyStaking); + + // ensure agent is sane. + ensure!(Self::is_agent(&agent), Error::::NotAgent); + + // add to delegation. + Self::do_delegate(&delegator, &agent, amount)?; + + // bond the newly delegated amount to `CoreStaking`. + Self::do_bond(&agent, amount) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } +} + +impl Pallet { + /// Derive a (keyless) pot account from the given agent account and account type. + pub(crate) fn sub_account(account_type: AccountType, agent: T::AccountId) -> T::AccountId { + T::PalletId::get().into_sub_account_truncating((account_type, agent.clone())) + } + + /// Held balance of a delegator. + pub(crate) fn held_balance_of(who: &T::AccountId) -> BalanceOf { + T::Currency::balance_on_hold(&HoldReason::StakingDelegation.into(), who) + } + + /// Returns true if who is registered as an `Agent`. + fn is_agent(who: &T::AccountId) -> bool { + >::contains_key(who) + } + + /// Returns true if who is delegating to an `Agent` account. + fn is_delegator(who: &T::AccountId) -> bool { + >::contains_key(who) + } + + /// Returns true if who is already staking on [`Config::CoreStaking`]. + fn is_direct_staker(who: &T::AccountId) -> bool { + T::CoreStaking::status(who).is_ok() + } + + /// Registers a new agent in the system. + fn do_register_agent(who: &T::AccountId, reward_account: &T::AccountId) { + AgentLedger::::new(reward_account).update(who); + + // Agent does not hold balance of its own but this pallet will provide for this to exist. + // This is expected to be a keyless account and not created by any user directly so safe. + // TODO: Someday if we allow anyone to be an agent, we should take a deposit for + // being a delegator. + frame_system::Pallet::::inc_providers(who); + } + + /// Migrate existing staker account `who` to an `Agent` account. + fn do_migrate_to_agent(who: &T::AccountId, reward_account: &T::AccountId) -> DispatchResult { + Self::do_register_agent(who, reward_account); + + // We create a proxy delegator that will keep all the delegation funds until funds are + // transferred to actual delegator. + let proxy_delegator = Self::sub_account(AccountType::ProxyDelegator, who.clone()); + + // Keep proxy delegator alive until all funds are migrated. + frame_system::Pallet::::inc_providers(&proxy_delegator); + + // Get current stake + let stake = T::CoreStaking::stake(who)?; + + // release funds from core staking. + T::CoreStaking::migrate_to_virtual_staker(who); + + // transfer just released staked amount plus any free amount. + let amount_to_transfer = + T::Currency::reducible_balance(who, Preservation::Expendable, Fortitude::Polite); + + // This should never fail but if it does, it indicates bad state and we abort. + T::Currency::transfer(who, &proxy_delegator, amount_to_transfer, Preservation::Expendable)?; + + T::CoreStaking::update_payee(who, reward_account)?; + // delegate all transferred funds back to agent. + Self::do_delegate(&proxy_delegator, who, amount_to_transfer)?; + + // if the transferred/delegated amount was greater than the stake, mark the extra as + // unclaimed withdrawal. + let unclaimed_withdraws = amount_to_transfer + .checked_sub(&stake.total) + .defensive_ok_or(ArithmeticError::Underflow)?; + + if !unclaimed_withdraws.is_zero() { + let mut ledger = AgentLedger::::get(who).ok_or(Error::::NotAgent)?; + ledger.unclaimed_withdrawals = ledger + .unclaimed_withdrawals + .checked_add(&unclaimed_withdraws) + .defensive_ok_or(ArithmeticError::Overflow)?; + ledger.update(who); + } + + Ok(()) + } + + /// Bond `amount` to `agent_acc` in [`Config::CoreStaking`]. + fn do_bond(agent_acc: &T::AccountId, amount: BalanceOf) -> DispatchResult { + let agent = Agent::::get(agent_acc)?; + + let available_to_bond = agent.available_to_bond(); + defensive_assert!(amount == available_to_bond, "not expected value to bond"); + + if agent.is_bonded() { + T::CoreStaking::bond_extra(&agent.key, amount) + } else { + T::CoreStaking::virtual_bond(&agent.key, amount, agent.reward_account()) + } + } + + /// Delegate `amount` from `delegator` to `agent`. + fn do_delegate( + delegator: &T::AccountId, + agent: &T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + let mut ledger = AgentLedger::::get(agent).ok_or(Error::::NotAgent)?; + // try to hold the funds. + T::Currency::hold(&HoldReason::StakingDelegation.into(), delegator, amount)?; + + let new_delegation_amount = + if let Some(existing_delegation) = Delegation::::get(delegator) { + ensure!(&existing_delegation.agent == agent, Error::::InvalidDelegation); + existing_delegation + .amount + .checked_add(&amount) + .ok_or(ArithmeticError::Overflow)? + } else { + amount + }; + + Delegation::::new(agent, new_delegation_amount).update_or_kill(delegator); + ledger.total_delegated = + ledger.total_delegated.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; + ledger.update(agent); + + Self::deposit_event(Event::::Delegated { + agent: agent.clone(), + delegator: delegator.clone(), + amount, + }); + + Ok(()) + } + + /// Release `amount` of delegated funds from `agent` to `delegator`. + fn do_release( + who: &T::AccountId, + delegator: &T::AccountId, + amount: BalanceOf, + num_slashing_spans: u32, + ) -> DispatchResult { + let mut agent = Agent::::get(who)?; + let mut delegation = Delegation::::get(delegator).ok_or(Error::::NotDelegator)?; + + // make sure delegation to be released is sound. + ensure!(&delegation.agent == who, Error::::NotAgent); + ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); + + // if we do not already have enough funds to be claimed, try withdraw some more. + // keep track if we killed the staker in the process. + let stash_killed = if agent.ledger.unclaimed_withdrawals < amount { + // withdraw account. + let killed = T::CoreStaking::withdraw_unbonded(who.clone(), num_slashing_spans) + .map_err(|_| Error::::WithdrawFailed)?; + // reload agent from storage since withdrawal might have changed the state. + agent = agent.refresh()?; + Some(killed) + } else { + None + }; + + // if we still do not have enough funds to release, abort. + ensure!(agent.ledger.unclaimed_withdrawals >= amount, Error::::NotEnoughFunds); + + // Claim withdraw from agent. Kill agent if no delegation left. + // TODO: Ideally if there is a register, there should be an unregister that should + // clean up the agent. Can be improved in future. + if agent.remove_unclaimed_withdraw(amount)?.update_or_kill()? { + match stash_killed { + Some(killed) => { + // this implies we did a `CoreStaking::withdraw` before release. Ensure + // we killed the staker as well. + ensure!(killed, Error::::BadState); + }, + None => { + // We did not do a `CoreStaking::withdraw` before release. Ensure staker is + // already killed in `CoreStaking`. + ensure!(T::CoreStaking::status(who).is_err(), Error::::BadState); + }, + } + + // Remove provider reference for `who`. + let _ = frame_system::Pallet::::dec_providers(who).defensive(); + } + + // book keep delegation + delegation.amount = delegation + .amount + .checked_sub(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + + // remove delegator if nothing delegated anymore + delegation.update_or_kill(delegator); + + let released = T::Currency::release( + &HoldReason::StakingDelegation.into(), + delegator, + amount, + Precision::BestEffort, + )?; + + defensive_assert!(released == amount, "hold should have been released fully"); + + Self::deposit_event(Event::::Released { + agent: who.clone(), + delegator: delegator.clone(), + amount, + }); + + Ok(()) + } + + /// Migrates delegation of `amount` from `source` account to `destination` account. + fn do_migrate_delegation( + source_delegator: &T::AccountId, + destination_delegator: &T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + let mut source_delegation = + Delegators::::get(source_delegator).defensive_ok_or(Error::::BadState)?; + + // some checks that must have already been checked before. + ensure!(source_delegation.amount >= amount, Error::::NotEnoughFunds); + debug_assert!( + !Self::is_delegator(destination_delegator) && !Self::is_agent(destination_delegator) + ); + + // update delegations + Delegation::::new(&source_delegation.agent, amount) + .update_or_kill(destination_delegator); + + source_delegation.amount = source_delegation + .amount + .checked_sub(&amount) + .defensive_ok_or(Error::::BadState)?; + + source_delegation.update_or_kill(source_delegator); + + // release funds from source + let released = T::Currency::release( + &HoldReason::StakingDelegation.into(), + source_delegator, + amount, + Precision::BestEffort, + )?; + + defensive_assert!(released == amount, "hold should have been released fully"); + + // transfer the released amount to `destination_delegator`. + let post_balance = T::Currency::transfer( + source_delegator, + destination_delegator, + amount, + Preservation::Expendable, + ) + .map_err(|_| Error::::BadState)?; + + // if balance is zero, clear provider for source (proxy) delegator. + if post_balance == Zero::zero() { + let _ = frame_system::Pallet::::dec_providers(source_delegator).defensive(); + } + + // hold the funds again in the new delegator account. + T::Currency::hold(&HoldReason::StakingDelegation.into(), destination_delegator, amount)?; + + Ok(()) + } + + /// Take slash `amount` from agent's `pending_slash`counter and apply it to `delegator` account. + pub fn do_slash( + agent_acc: T::AccountId, + delegator: T::AccountId, + amount: BalanceOf, + maybe_reporter: Option, + ) -> DispatchResult { + let agent = Agent::::get(&agent_acc)?; + // ensure there is something to slash + ensure!(agent.ledger.pending_slash > Zero::zero(), Error::::NothingToSlash); + + let mut delegation = >::get(&delegator).ok_or(Error::::NotDelegator)?; + ensure!(delegation.agent == agent_acc, Error::::NotAgent); + ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); + + // slash delegator + let (mut credit, missing) = + T::Currency::slash(&HoldReason::StakingDelegation.into(), &delegator, amount); + + defensive_assert!(missing.is_zero(), "slash should have been fully applied"); + + let actual_slash = credit.peek(); + + // remove the applied slashed amount from agent. + agent.remove_slash(actual_slash).save(); + delegation.amount = + delegation.amount.checked_sub(&actual_slash).ok_or(ArithmeticError::Overflow)?; + delegation.update_or_kill(&delegator); + + if let Some(reporter) = maybe_reporter { + let reward_payout: BalanceOf = T::SlashRewardFraction::get() * actual_slash; + let (reporter_reward, rest) = credit.split(reward_payout); + + // credit is the amount that we provide to `T::OnSlash`. + credit = rest; + + // reward reporter or drop it. + let _ = T::Currency::resolve(&reporter, reporter_reward); + } + + T::OnSlash::on_unbalanced(credit); + + Self::deposit_event(Event::::Slashed { agent: agent_acc, delegator, amount }); + + Ok(()) + } + + /// Total balance that is available for stake. Includes already staked amount. + #[cfg(test)] + pub(crate) fn stakeable_balance(who: &T::AccountId) -> BalanceOf { + Agent::::get(who) + .map(|agent| agent.ledger.stakeable_balance()) + .unwrap_or_default() + } +} + +#[cfg(any(test, feature = "try-runtime"))] +use sp_std::collections::btree_map::BTreeMap; + +#[cfg(any(test, feature = "try-runtime"))] +impl Pallet { + pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + // build map to avoid reading storage multiple times. + let delegation_map = Delegators::::iter().collect::>(); + let ledger_map = Agents::::iter().collect::>(); + + Self::check_delegates(ledger_map.clone())?; + Self::check_delegators(delegation_map, ledger_map)?; + + Ok(()) + } + + fn check_delegates( + ledgers: BTreeMap>, + ) -> Result<(), sp_runtime::TryRuntimeError> { + for (agent, ledger) in ledgers { + ensure!( + matches!( + T::CoreStaking::status(&agent).expect("agent should be bonded"), + sp_staking::StakerStatus::Nominator(_) | sp_staking::StakerStatus::Idle + ), + "agent should be bonded and not validator" + ); + + ensure!( + ledger.stakeable_balance() >= + T::CoreStaking::total_stake(&agent) + .expect("agent should exist as a nominator"), + "Cannot stake more than balance" + ); + } + + Ok(()) + } + + fn check_delegators( + delegations: BTreeMap>, + ledger: BTreeMap>, + ) -> Result<(), sp_runtime::TryRuntimeError> { + let mut delegation_aggregation = BTreeMap::>::new(); + for (delegator, delegation) in delegations.iter() { + ensure!( + T::CoreStaking::status(delegator).is_err(), + "delegator should not be directly staked" + ); + ensure!(!Self::is_agent(delegator), "delegator cannot be an agent"); + + delegation_aggregation + .entry(delegation.agent.clone()) + .and_modify(|e| *e += delegation.amount) + .or_insert(delegation.amount); + } + + for (agent, total_delegated) in delegation_aggregation { + ensure!(!Self::is_delegator(&agent), "agent cannot be delegator"); + + let ledger = ledger.get(&agent).expect("ledger should exist"); + ensure!( + ledger.total_delegated == total_delegated, + "ledger total delegated should match delegations" + ); + } + + Ok(()) + } +} diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs new file mode 100644 index 00000000000..21a9fe6b227 --- /dev/null +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -0,0 +1,308 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{self as delegated_staking, types::Agent}; +use frame_support::{ + assert_ok, derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, Currency}, + PalletId, +}; + +use sp_runtime::{traits::IdentityLookup, BuildStorage, Perbill}; + +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; +use frame_support::dispatch::RawOrigin; +use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra}; +use sp_staking::{Stake, StakingInterface}; + +pub type T = Runtime; +type Block = frame_system::mocking::MockBlock; +pub type AccountId = u128; + +pub const GENESIS_VALIDATOR: AccountId = 1; +pub const GENESIS_NOMINATOR_ONE: AccountId = 101; +pub const GENESIS_NOMINATOR_TWO: AccountId = 102; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type AccountData = pallet_balances::AccountData; + type AccountId = AccountId; + type Lookup = IdentityLookup; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +pub type Balance = u128; + +parameter_types! { + pub static ExistentialDeposit: Balance = 1; +} +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<128>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = ConstU32<1>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; +} + +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static BondingDuration: u32 = 3; + pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); +} +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Runtime; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type Bounds = ElectionsBoundsOnChain; +} + +impl pallet_staking::Config for Runtime { + type Currency = Balances; + type CurrencyBalance = Balance; + type UnixTime = pallet_timestamp::Pallet; + type CurrencyToVote = (); + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = ConstU32<1>; + type SlashDeferDuration = (); + type AdminOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = (); + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = (); + type HistoryDepth = ConstU32<84>; + type MaxExposurePageSize = ConstU32<64>; + type ElectionProvider = onchain::OnChainExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type MaxUnlockingChunks = ConstU32<10>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type EventListeners = DelegatedStaking; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; +} + +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(10); +} +impl delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + +parameter_types! { + pub static MaxUnbonding: u32 = 8; +} + +frame_support::construct_runtime!( + pub enum Runtime { + System: frame_system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Staking: pallet_staking, + DelegatedStaking: delegated_staking, + } +); + +#[derive(Default)] +pub struct ExtBuilder {} + +impl ExtBuilder { + fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![ + (GENESIS_VALIDATOR, 10000), + (GENESIS_NOMINATOR_ONE, 1000), + (GENESIS_NOMINATOR_TWO, 2000), + ], + } + .assimilate_storage(&mut storage); + + let stakers = vec![ + ( + GENESIS_VALIDATOR, + GENESIS_VALIDATOR, + 1000, + sp_staking::StakerStatus::::Validator, + ), + ( + GENESIS_NOMINATOR_ONE, + GENESIS_NOMINATOR_ONE, + 100, + sp_staking::StakerStatus::::Nominator(vec![1]), + ), + ( + GENESIS_NOMINATOR_TWO, + GENESIS_NOMINATOR_TWO, + 200, + sp_staking::StakerStatus::::Nominator(vec![1]), + ), + ]; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + // ideal validator count + validator_count: 2, + minimum_validator_count: 1, + invulnerables: vec![], + slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // for events to be deposited. + frame_system::Pallet::::set_block_number(1); + // set era for staking. + start_era(0); + }); + + ext + } + pub fn build_and_execute(self, test: impl FnOnce()) { + sp_tracing::try_init_simple(); + let mut ext = self.build(); + ext.execute_with(test); + ext.execute_with(|| { + #[cfg(feature = "try-runtime")] + >::try_state( + frame_system::Pallet::::block_number(), + frame_support::traits::TryStateSelect::All, + ) + .unwrap(); + #[cfg(not(feature = "try-runtime"))] + DelegatedStaking::do_try_state().unwrap(); + }); + } +} + +/// fund and return who. +pub(crate) fn fund(who: &AccountId, amount: Balance) { + let _ = Balances::deposit_creating(who, amount); +} + +/// Sets up delegation for passed delegators, returns total delegated amount. +/// +/// `delegate_amount` is incremented by the amount `increment` starting with `base_delegate_amount` +/// from lower index to higher index of delegators. +pub(crate) fn setup_delegation_stake( + agent: AccountId, + reward_acc: AccountId, + delegators: Vec, + base_delegate_amount: Balance, + increment: Balance, +) -> Balance { + fund(&agent, 100); + assert_ok!(DelegatedStaking::register_agent(RawOrigin::Signed(agent).into(), reward_acc)); + let mut delegated_amount: Balance = 0; + for (index, delegator) in delegators.iter().enumerate() { + let amount_to_delegate = base_delegate_amount + increment * index as Balance; + delegated_amount += amount_to_delegate; + + fund(delegator, amount_to_delegate + ExistentialDeposit::get()); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(*delegator).into(), + agent, + amount_to_delegate + )); + } + + // sanity checks + assert_eq!(DelegatedStaking::stakeable_balance(&agent), delegated_amount); + assert_eq!(Agent::::get(&agent).unwrap().available_to_bond(), 0); + + delegated_amount +} + +pub(crate) fn start_era(era: sp_staking::EraIndex) { + CurrentEra::::set(Some(era)); + ActiveEra::::set(Some(ActiveEraInfo { index: era, start: None })); +} + +pub(crate) fn eq_stake(who: AccountId, total: Balance, active: Balance) -> bool { + Staking::stake(&who).unwrap() == Stake { total, active } && + get_agent(&who).ledger.stakeable_balance() == total +} + +pub(crate) fn get_agent(agent: &AccountId) -> Agent { + Agent::::get(agent).expect("delegate should exist") +} + +parameter_types! { + static ObservedEventsDelegatedStaking: usize = 0; +} + +#[allow(unused)] +pub(crate) fn events_since_last_call() -> Vec> { + let events = System::read_events_for_pallet::>(); + let already_seen = ObservedEventsDelegatedStaking::get(); + ObservedEventsDelegatedStaking::set(events.len()); + events.into_iter().skip(already_seen).collect() +} diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs new file mode 100644 index 00000000000..1f36f655beb --- /dev/null +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -0,0 +1,685 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-delegated-staking. + +use super::*; +use crate::mock::*; +use frame_support::{assert_noop, assert_ok, traits::fungible::InspectHold}; +use pallet_staking::Error as StakingError; +use sp_staking::DelegationInterface; + +#[test] +fn create_an_agent_with_first_delegator() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 200; + let reward_account: AccountId = 201; + let delegator: AccountId = 202; + + // set intention to accept delegation. + fund(&agent, 1000); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_account + )); + + // delegate to this account + fund(&delegator, 1000); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 100 + )); + + // verify + assert!(DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(&agent), 100); + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + 100 + ); + assert_eq!(DelegatedStaking::held_balance_of(&delegator), 100); + }); +} + +#[test] +fn cannot_become_agent() { + ExtBuilder::default().build_and_execute(|| { + // cannot set reward account same as agent account + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(100).into(), 100), + Error::::InvalidRewardDestination + ); + + // an existing validator cannot become agent + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(mock::GENESIS_VALIDATOR).into(), + 100 + ), + Error::::AlreadyStaking + ); + + // an existing direct staker to `CoreStaking` cannot become an agent. + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(mock::GENESIS_NOMINATOR_ONE).into(), + 100 + ), + Error::::AlreadyStaking + ); + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(mock::GENESIS_NOMINATOR_TWO).into(), + 100 + ), + Error::::AlreadyStaking + ); + }); +} + +#[test] +fn create_multiple_delegators() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 200; + let reward_account: AccountId = 201; + + // stakeable balance is 0 for non agent + fund(&agent, 1000); + assert!(!DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(&agent), 0); + + // set intention to accept delegation. + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_account + )); + + // create 100 delegators + for i in 202..302 { + fund(&i, 100 + ExistentialDeposit::get()); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(i).into(), + agent, + 100 + )); + // Balance of 100 held on delegator account for delegating to the agent. + assert_eq!(Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &i), 100); + } + + // verify + assert!(DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(&agent), 100 * 100); + }); +} + +#[test] +fn agent_restrictions() { + // Similar to creating a nomination pool + ExtBuilder::default().build_and_execute(|| { + let agent_one = 200; + let delegator_one = 210; + fund(&agent_one, 100); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent_one).into(), + agent_one + 1 + )); + fund(&delegator_one, 200); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + agent_one, + 100 + )); + + let agent_two = 300; + let delegator_two = 310; + fund(&agent_two, 100); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent_two).into(), + agent_two + 1 + )); + fund(&delegator_two, 200); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_two).into(), + agent_two, + 100 + )); + + // agent one tries to delegate to agent 2 + assert_noop!( + DelegatedStaking::delegate_to_agent(RawOrigin::Signed(agent_one).into(), agent_two, 10), + Error::::InvalidDelegation + ); + + // agent one tries to delegate to a delegator + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(agent_one).into(), + delegator_one, + 10 + ), + Error::::InvalidDelegation + ); + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(agent_one).into(), + delegator_two, + 10 + ), + Error::::InvalidDelegation + ); + + // delegator one tries to delegate to agent 2 as well (it already delegates to agent + // 1) + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + agent_two, + 10 + ), + Error::::InvalidDelegation + ); + + // cannot delegate to non agents. + let non_agent = 201; + // give it some funds + fund(&non_agent, 200); + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + non_agent, + 10 + ), + Error::::InvalidDelegation + ); + + // cannot delegate to a delegator + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + delegator_two, + 10 + ), + Error::::InvalidDelegation + ); + + // delegator cannot delegate to self + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + delegator_one, + 10 + ), + Error::::InvalidDelegation + ); + + // agent cannot delegate to self + assert_noop!( + DelegatedStaking::delegate_to_agent(RawOrigin::Signed(agent_one).into(), agent_one, 10), + Error::::InvalidDelegation + ); + }); +} + +#[test] +fn apply_pending_slash() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let agent: AccountId = 200; + let reward_acc: AccountId = 201; + let delegators: Vec = (301..=350).collect(); + let reporter: AccountId = 400; + + let total_staked = setup_delegation_stake(agent, reward_acc, delegators.clone(), 10, 10); + + start_era(4); + // slash half of the stake + pallet_staking::slashing::do_slash::( + &agent, + total_staked / 2, + &mut Default::default(), + &mut Default::default(), + 3, + ); + + // agent cannot slash an account that is not its delegator. + setup_delegation_stake(210, 211, (351..=352).collect(), 100, 0); + assert_noop!( + ::delegator_slash(&agent, &351, 1, Some(400)), + Error::::NotAgent + ); + // or a non delegator account + fund(&353, 100); + assert_noop!( + ::delegator_slash(&agent, &353, 1, Some(400)), + Error::::NotDelegator + ); + + // ensure bookkept pending slash is correct. + assert_eq!(get_agent(&agent).ledger.pending_slash, total_staked / 2); + let mut old_reporter_balance = Balances::free_balance(reporter); + + // lets apply the pending slash on delegators. + for i in delegators { + // balance before slash + let initial_pending_slash = get_agent(&agent).ledger.pending_slash; + assert!(initial_pending_slash > 0); + let unslashed_balance = DelegatedStaking::held_balance_of(&i); + let slash = unslashed_balance / 2; + // slash half of delegator's delegation. + assert_ok!(::delegator_slash( + &agent, + &i, + slash, + Some(400) + )); + + // balance after slash. + assert_eq!(DelegatedStaking::held_balance_of(&i), unslashed_balance - slash); + // pending slash is reduced by the amount slashed. + assert_eq!(get_agent(&agent).ledger.pending_slash, initial_pending_slash - slash); + // reporter get 10% of the slash amount. + assert_eq!( + Balances::free_balance(reporter) - old_reporter_balance, + ::slash_reward_fraction() * slash, + ); + // update old balance + old_reporter_balance = Balances::free_balance(reporter); + } + + // nothing to slash anymore + assert_eq!(get_agent(&agent).ledger.pending_slash, 0); + + // cannot slash anymore + assert_noop!( + ::delegator_slash(&agent, &350, 1, None), + Error::::NothingToSlash + ); + }); +} + +/// Integration tests with pallet-staking. +mod staking_integration { + use super::*; + use pallet_staking::RewardDestination; + use sp_staking::Stake; + + #[test] + fn bond() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 99; + let reward_acc: AccountId = 100; + assert_eq!(Staking::status(&agent), Err(StakingError::::NotStash.into())); + + // set intention to become an agent + fund(&agent, 100); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_acc + )); + assert_eq!(DelegatedStaking::stakeable_balance(&agent), 0); + + let mut delegated_balance: Balance = 0; + + // set some delegations + for delegator in 200..250 { + fund(&delegator, 200); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 100 + )); + delegated_balance += 100; + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + 100 + ); + assert_eq!(DelegatedStaking::delegator_balance(&delegator), 100); + + let agent_obj = get_agent(&agent); + assert_eq!(agent_obj.ledger.stakeable_balance(), delegated_balance); + assert_eq!(agent_obj.available_to_bond(), 0); + assert_eq!(agent_obj.bonded_stake(), delegated_balance); + } + + assert_eq!(Staking::stake(&agent).unwrap(), Stake { total: 50 * 100, active: 50 * 100 }) + }); + } + + #[test] + fn withdraw_test() { + ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + let agent: AccountId = 200; + let reward_acc: AccountId = 201; + let delegators: Vec = (301..=350).collect(); + let total_staked = + setup_delegation_stake(agent, reward_acc, delegators.clone(), 10, 10); + + // lets go to a new era + start_era(2); + + assert!(eq_stake(agent, total_staked, total_staked)); + // Withdrawing without unbonding would fail. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 301, 50, 0), + Error::::NotEnoughFunds + ); + + // 305 wants to unbond 50 in era 2, withdrawable in era 5. + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 50)); + + // 310 wants to unbond 100 in era 3, withdrawable in era 6. + start_era(3); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 100)); + + // 320 wants to unbond 200 in era 4, withdrawable in era 7. + start_era(4); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 200)); + + // active stake is now reduced.. + let expected_active = total_staked - (50 + 100 + 200); + assert!(eq_stake(agent, total_staked, expected_active)); + + // nothing to withdraw at era 4 + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 305, 50, 0), + Error::::NotEnoughFunds + ); + + assert_eq!(get_agent(&agent).available_to_bond(), 0); + // full amount is still delegated + assert_eq!(get_agent(&agent).ledger.effective_balance(), total_staked); + + start_era(5); + // at era 5, 50 tokens are withdrawable, cannot withdraw more. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 305, 51, 0), + Error::::NotEnoughFunds + ); + // less is possible + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 305, + 30, + 0 + )); + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 305, + 20, + 0 + )); + + // Lets go to future era where everything is unbonded. Withdrawable amount: 100 + 200 + start_era(7); + // 305 has no more amount delegated so it cannot withdraw. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 305, 5, 0), + Error::::NotDelegator + ); + // 309 is an active delegator but has total delegation of 90, so it cannot withdraw more + // than that. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 309, 91, 0), + Error::::NotEnoughFunds + ); + // 310 cannot withdraw more than delegated funds. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 310, 101, 0), + Error::::NotEnoughFunds + ); + // but can withdraw all its delegation amount. + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 310, + 100, + 0 + )); + // 320 can withdraw all its delegation amount. + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 320, + 200, + 0 + )); + + // cannot withdraw anything more.. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 301, 1, 0), + Error::::NotEnoughFunds + ); + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 350, 1, 0), + Error::::NotEnoughFunds + ); + }); + } + + #[test] + fn withdraw_happens_with_unbonded_balance_first() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let agent = 200; + setup_delegation_stake(agent, 201, (300..350).collect(), 100, 0); + + // verify withdraw not possible yet + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 100, 0), + Error::::NotEnoughFunds + ); + + // fill up unlocking chunks in core staking. + // 10 is the max chunks + for i in 2..=11 { + start_era(i); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); + // no withdrawals from core staking yet. + assert_eq!(get_agent(&agent).ledger.unclaimed_withdrawals, 0); + } + + // another unbond would trigger withdrawal + start_era(12); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); + + // 8 previous unbonds would be withdrawn as they were already unlocked. Unlocking period + // is 3 eras. + assert_eq!(get_agent(&agent).ledger.unclaimed_withdrawals, 8 * 10); + + // release some delegation now. + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 300, + 40, + 0 + )); + assert_eq!(get_agent(&agent).ledger.unclaimed_withdrawals, 80 - 40); + + // cannot release more than available + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 50, 0), + Error::::NotEnoughFunds + ); + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 300, + 40, + 0 + )); + + assert_eq!(DelegatedStaking::held_balance_of(&300), 100 - 80); + }); + } + + #[test] + fn reward_destination_restrictions() { + ExtBuilder::default().build_and_execute(|| { + // give some funds to 200 + fund(&200, 1000); + let balance_200 = Balances::free_balance(200); + + // `Agent` account cannot be reward destination + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(200).into(), 200), + Error::::InvalidRewardDestination + ); + + // different reward account works + assert_ok!(DelegatedStaking::register_agent(RawOrigin::Signed(200).into(), 201)); + // add some delegations to it + fund(&300, 1000); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(300).into(), + 200, + 100 + )); + + // update_payee to self fails. + assert_noop!( + ::update_payee(&200, &200), + StakingError::::RewardDestinationRestricted + ); + + // passing correct reward destination works + assert_ok!(::update_payee(&200, &201)); + + // amount is staked correctly + assert!(eq_stake(200, 100, 100)); + assert_eq!(get_agent(&200).available_to_bond(), 0); + assert_eq!(get_agent(&200).ledger.effective_balance(), 100); + + // free balance of delegate is untouched + assert_eq!(Balances::free_balance(200), balance_200); + }); + } + + #[test] + fn agent_restrictions() { + ExtBuilder::default().build_and_execute(|| { + setup_delegation_stake(200, 201, (202..203).collect(), 100, 0); + + // Registering again is noop + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(200).into(), 201), + Error::::NotAllowed + ); + // a delegator cannot become delegate + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(202).into(), 203), + Error::::NotAllowed + ); + // existing staker cannot become a delegate + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(GENESIS_NOMINATOR_ONE).into(), + 201 + ), + Error::::AlreadyStaking + ); + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(GENESIS_VALIDATOR).into(), 201), + Error::::AlreadyStaking + ); + }); + } + + #[test] + fn migration_works() { + ExtBuilder::default().build_and_execute(|| { + // add a nominator + let staked_amount = 4000; + let agent_amount = 5000; + fund(&200, agent_amount); + + assert_ok!(Staking::bond( + RuntimeOrigin::signed(200), + staked_amount, + RewardDestination::Account(201) + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(200), vec![GENESIS_VALIDATOR],)); + let init_stake = Staking::stake(&200).unwrap(); + + // scenario: 200 is a pool account, and the stake comes from its 4 delegators (300..304) + // in equal parts. lets try to migrate this nominator into delegate based stake. + + // all balance currently is in 200 + assert_eq!(Balances::free_balance(200), agent_amount); + + // to migrate, nominator needs to set an account as a proxy delegator where staked funds + // will be moved and delegated back to this old nominator account. This should be funded + // with at least ED. + let proxy_delegator = DelegatedStaking::sub_account(AccountType::ProxyDelegator, 200); + + assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(200).into(), 201)); + + // verify all went well + let mut expected_proxy_delegated_amount = agent_amount; + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &proxy_delegator), + expected_proxy_delegated_amount + ); + // stake amount is transferred from delegate to proxy delegator account. + assert_eq!(Balances::free_balance(200), 0); + assert_eq!(Staking::stake(&200).unwrap(), init_stake); + assert_eq!(get_agent(&200).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent(&200).available_to_bond(), 0); + assert_eq!(get_agent(&200).ledger.unclaimed_withdrawals, agent_amount - staked_amount); + + // now lets migrate the delegators + let delegator_share = agent_amount / 4; + for delegator in 300..304 { + assert_eq!(Balances::free_balance(delegator), 0); + // fund them with ED + fund(&delegator, ExistentialDeposit::get()); + // migrate 1/4th amount into each delegator + assert_ok!(DelegatedStaking::migrate_delegation( + RawOrigin::Signed(200).into(), + delegator, + delegator_share + )); + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + delegator_share + ); + expected_proxy_delegated_amount -= delegator_share; + assert_eq!( + Balances::balance_on_hold( + &HoldReason::StakingDelegation.into(), + &proxy_delegator + ), + expected_proxy_delegated_amount + ); + + // delegate stake is unchanged. + assert_eq!(Staking::stake(&200).unwrap(), init_stake); + assert_eq!(get_agent(&200).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent(&200).available_to_bond(), 0); + assert_eq!( + get_agent(&200).ledger.unclaimed_withdrawals, + agent_amount - staked_amount + ); + } + + // cannot use migrate delegator anymore + assert_noop!( + DelegatedStaking::migrate_delegation(RawOrigin::Signed(200).into(), 305, 1), + Error::::NotEnoughFunds + ); + }); + } +} diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs new file mode 100644 index 00000000000..0bfc23281df --- /dev/null +++ b/substrate/frame/delegated-staking/src/types.rs @@ -0,0 +1,292 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Basic types used in delegated staking. + +use super::*; +use frame_support::traits::DefensiveSaturating; + +/// The type of pot account being created. +#[derive(Encode, Decode)] +pub(crate) enum AccountType { + /// A proxy delegator account created for a nominator who migrated to an `Agent` account. + /// + /// Funds for unmigrated `delegator` accounts of the `Agent` are kept here. + ProxyDelegator, +} + +/// Information about delegation of a `delegator`. +#[derive(Default, Encode, Clone, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct Delegation { + /// The target of delegation. + pub agent: T::AccountId, + /// The amount delegated. + pub amount: BalanceOf, +} + +impl Delegation { + /// Get delegation of a `delegator`. + pub(crate) fn get(delegator: &T::AccountId) -> Option { + >::get(delegator) + } + + /// Create and return a new delegation instance. + pub(crate) fn new(agent: &T::AccountId, amount: BalanceOf) -> Self { + Delegation { agent: agent.clone(), amount } + } + + /// Ensure the delegator is either a new delegator or they are adding more delegation to the + /// existing agent. + /// + /// Delegators are prevented from delegating to multiple agents at the same time. + pub(crate) fn can_delegate(delegator: &T::AccountId, agent: &T::AccountId) -> bool { + Delegation::::get(delegator) + .map(|delegation| delegation.agent == *agent) + .unwrap_or( + // all good if it is a new delegator except it should not be an existing agent. + !>::contains_key(delegator), + ) + } + + /// Save self to storage. If the delegation amount is zero, remove the delegation. + pub(crate) fn update_or_kill(self, key: &T::AccountId) { + // Clean up if no delegation left. + if self.amount == Zero::zero() { + >::remove(key); + return + } + + >::insert(key, self) + } +} + +/// Ledger of all delegations to an `Agent`. +/// +/// This keeps track of the active balance of the `Agent` that is made up from the funds that +/// are currently delegated to this `Agent`. It also tracks the pending slashes yet to be +/// applied among other things. +#[derive(Default, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct AgentLedger { + /// Where the reward should be paid out. + pub payee: T::AccountId, + /// Sum of all delegated funds to this `Agent`. + #[codec(compact)] + pub total_delegated: BalanceOf, + /// Funds that are withdrawn from core staking but not released to delegator/s. It is a subset + /// of `total_delegated` and can never be greater than it. + /// + /// We need this register to ensure that the `Agent` does not bond funds from delegated + /// funds that are withdrawn and should be claimed by delegators. + #[codec(compact)] + pub unclaimed_withdrawals: BalanceOf, + /// Slashes that are not yet applied. This affects the effective balance of the `Agent`. + #[codec(compact)] + pub pending_slash: BalanceOf, +} + +impl AgentLedger { + /// Create a new instance of `AgentLedger`. + pub(crate) fn new(reward_destination: &T::AccountId) -> Self { + AgentLedger { + payee: reward_destination.clone(), + total_delegated: Zero::zero(), + unclaimed_withdrawals: Zero::zero(), + pending_slash: Zero::zero(), + } + } + + /// Get `AgentLedger` from storage. + pub(crate) fn get(key: &T::AccountId) -> Option { + >::get(key) + } + + /// Save self to storage with the given key. + pub(crate) fn update(self, key: &T::AccountId) { + >::insert(key, self) + } + + /// Effective total balance of the `Agent`. + /// + /// This takes into account any slashes reported to `Agent` but unapplied. + pub(crate) fn effective_balance(&self) -> BalanceOf { + defensive_assert!( + self.total_delegated >= self.pending_slash, + "slash cannot be higher than actual balance of delegator" + ); + + // pending slash needs to be burned and cannot be used for stake. + self.total_delegated.saturating_sub(self.pending_slash) + } + + /// Agent balance that can be staked/bonded in [`T::CoreStaking`]. + pub(crate) fn stakeable_balance(&self) -> BalanceOf { + self.effective_balance().saturating_sub(self.unclaimed_withdrawals) + } +} + +/// Wrapper around `AgentLedger` to provide some helper functions to mutate the ledger. +#[derive(Clone)] +pub struct Agent { + /// storage key + pub key: T::AccountId, + /// storage value + pub ledger: AgentLedger, +} + +impl Agent { + /// Get `Agent` from storage if it exists or return an error. + pub(crate) fn get(agent: &T::AccountId) -> Result, DispatchError> { + let ledger = AgentLedger::::get(agent).ok_or(Error::::NotAgent)?; + Ok(Agent { key: agent.clone(), ledger }) + } + + /// Remove funds that are withdrawn from [Config::CoreStaking] but not claimed by a delegator. + /// + /// Checked decrease of delegation amount from `total_delegated` and `unclaimed_withdrawals` + /// registers. Consumes self and returns a new instance of self if success. + pub(crate) fn remove_unclaimed_withdraw( + self, + amount: BalanceOf, + ) -> Result { + let new_total_delegated = self + .ledger + .total_delegated + .checked_sub(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + let new_unclaimed_withdrawals = self + .ledger + .unclaimed_withdrawals + .checked_sub(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + + Ok(Agent { + ledger: AgentLedger { + total_delegated: new_total_delegated, + unclaimed_withdrawals: new_unclaimed_withdrawals, + ..self.ledger + }, + ..self + }) + } + + /// Add funds that are withdrawn from [Config::CoreStaking] to be claimed by delegators later. + pub(crate) fn add_unclaimed_withdraw( + self, + amount: BalanceOf, + ) -> Result { + let new_unclaimed_withdrawals = self + .ledger + .unclaimed_withdrawals + .checked_add(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + + Ok(Agent { + ledger: AgentLedger { unclaimed_withdrawals: new_unclaimed_withdrawals, ..self.ledger }, + ..self + }) + } + + /// Amount that is delegated but not bonded yet. + /// + /// This importantly does not include `unclaimed_withdrawals` as those should not be bonded + /// again unless explicitly requested. + pub(crate) fn available_to_bond(&self) -> BalanceOf { + let bonded_stake = self.bonded_stake(); + let stakeable = self.ledger.stakeable_balance(); + + defensive_assert!( + stakeable >= bonded_stake, + "cannot be bonded with more than total amount delegated to agent" + ); + + stakeable.saturating_sub(bonded_stake) + } + + /// Remove slashes from the `AgentLedger`. + pub(crate) fn remove_slash(self, amount: BalanceOf) -> Self { + let pending_slash = self.ledger.pending_slash.defensive_saturating_sub(amount); + let total_delegated = self.ledger.total_delegated.defensive_saturating_sub(amount); + + Agent { ledger: AgentLedger { pending_slash, total_delegated, ..self.ledger }, ..self } + } + + /// Get the total stake of agent bonded in [`Config::CoreStaking`]. + pub(crate) fn bonded_stake(&self) -> BalanceOf { + T::CoreStaking::total_stake(&self.key).unwrap_or(Zero::zero()) + } + + /// Returns true if the agent is bonded in [`Config::CoreStaking`]. + pub(crate) fn is_bonded(&self) -> bool { + T::CoreStaking::stake(&self.key).is_ok() + } + + /// Returns the reward account registered by the agent. + pub(crate) fn reward_account(&self) -> &T::AccountId { + &self.ledger.payee + } + + /// Save self to storage. + pub(crate) fn save(self) { + let key = self.key; + self.ledger.update(&key) + } + + /// Save self and remove if no delegation left. + /// + /// Returns: + /// - true if agent killed. + /// - error if the delegate is in an unexpected state. + pub(crate) fn update_or_kill(self) -> Result { + let key = self.key; + // see if delegate can be killed + if self.ledger.total_delegated == Zero::zero() { + ensure!( + self.ledger.unclaimed_withdrawals == Zero::zero() && + self.ledger.pending_slash == Zero::zero(), + Error::::BadState + ); + >::remove(key); + return Ok(true) + } + self.ledger.update(&key); + Ok(false) + } + + /// Reloads self from storage. + pub(crate) fn refresh(self) -> Result, DispatchError> { + Self::get(&self.key) + } + + /// Balance of `Agent` that is not bonded. + /// + /// This is similar to [Self::available_to_bond] except it also includes `unclaimed_withdrawals` + /// of `Agent`. + #[cfg(test)] + #[allow(unused)] + pub(crate) fn total_unbonded(&self) -> BalanceOf { + let bonded_stake = self.bonded_stake(); + + let net_balance = self.ledger.effective_balance(); + + assert!(net_balance >= bonded_stake, "cannot be bonded with more than the agent balance"); + + net_balance.saturating_sub(bonded_stake) + } +} diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 692e62acfdf..4f91fd6dff2 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -376,7 +376,7 @@ pub struct ActiveEraInfo { /// /// Start can be none if start hasn't been set for the era yet, /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. - start: Option, + pub start: Option, } /// Reward points of an era. Used to split era total payout between validators. diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index ad6cc6e2f4f..c7045508cea 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -456,4 +456,123 @@ pub struct PagedExposureMetadata { pub page_count: Page, } +/// Trait to provide delegation functionality for stakers. +/// +/// Introduces two new terms to the staking system: +/// - `Delegator`: An account that delegates funds to an `Agent`. +/// - `Agent`: An account that receives delegated funds from `Delegators`. It can then use these +/// funds to participate in the staking system. It can never use its own funds to stake. They +/// (virtually bond)[`StakingUnchecked::virtual_bond`] into the staking system and can also be +/// termed as `Virtual Nominators`. +/// +/// The `Agent` is responsible for managing rewards and slashing for all the `Delegators` that +/// have delegated funds to it. +pub trait DelegationInterface { + /// Balance type used by the staking system. + type Balance: Sub + + Ord + + PartialEq + + Default + + Copy + + MaxEncodedLen + + FullCodec + + TypeInfo + + Saturating; + + /// AccountId type used by the staking system. + type AccountId: Clone + core::fmt::Debug; + + /// Effective balance of the `Agent` account. + /// + /// This takes into account any pending slashes to `Agent`. + fn agent_balance(agent: &Self::AccountId) -> Self::Balance; + + /// Returns the total amount of funds delegated by a `delegator`. + fn delegator_balance(delegator: &Self::AccountId) -> Self::Balance; + + /// Delegate funds to `Agent`. + /// + /// Only used for the initial delegation. Use [`Self::delegate_extra`] to add more delegation. + fn delegate( + delegator: &Self::AccountId, + agent: &Self::AccountId, + reward_account: &Self::AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Add more delegation to the `Agent`. + /// + /// If this is the first delegation, use [`Self::delegate`] instead. + fn delegate_extra( + delegator: &Self::AccountId, + agent: &Self::AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Withdraw or revoke delegation to `Agent`. + /// + /// If there are `Agent` funds upto `amount` available to withdraw, then those funds would + /// be released to the `delegator` + fn withdraw_delegation( + delegator: &Self::AccountId, + agent: &Self::AccountId, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult; + + /// Returns true if there are pending slashes posted to the `Agent` account. + /// + /// Slashes to `Agent` account are not immediate and are applied lazily. Since `Agent` + /// has an unbounded number of delegators, immediate slashing is not possible. + fn has_pending_slash(agent: &Self::AccountId) -> bool; + + /// Apply a pending slash to an `Agent` by slashing `value` from `delegator`. + /// + /// A reporter may be provided (if one exists) in order for the implementor to reward them, + /// if applicable. + fn delegator_slash( + agent: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + maybe_reporter: Option, + ) -> DispatchResult; +} + +/// Trait to provide functionality for direct stakers to migrate to delegation agents. +/// See [`DelegationInterface`] for more details on delegation. +pub trait DelegationMigrator { + /// Balance type used by the staking system. + type Balance: Sub + + Ord + + PartialEq + + Default + + Copy + + MaxEncodedLen + + FullCodec + + TypeInfo + + Saturating; + + /// AccountId type used by the staking system. + type AccountId: Clone + core::fmt::Debug; + + /// Migrate an existing `Nominator` to `Agent` account. + /// + /// The implementation should ensure the `Nominator` account funds are moved to an escrow + /// from which `Agents` can later release funds to its `Delegators`. + fn migrate_nominator_to_agent( + agent: &Self::AccountId, + reward_account: &Self::AccountId, + ) -> DispatchResult; + + /// Migrate `value` of delegation to `delegator` from a migrating agent. + /// + /// When a direct `Nominator` migrates to `Agent`, the funds are kept in escrow. This function + /// allows the `Agent` to release the funds to the `delegator`. + fn migrate_delegation( + agent: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + ) -> DispatchResult; +} + sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); -- GitLab From f2b367ee8df4d41b1dadd41c609264f3a223c06d Mon Sep 17 00:00:00 2001 From: Alex Wang Date: Wed, 15 May 2024 20:06:09 +0800 Subject: [PATCH 010/106] Add OnFinality kusama bootnode (#4458) This is for adding onfinality polkadot bootnode. Please correct me if this is not the right place for adding a new bootnode --- polkadot/node/service/chain-specs/kusama.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index aa5a199cfee..23094360866 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -36,7 +36,8 @@ "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8", - "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" + "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns4/kusama-0.boot.onfinality.io/tcp/27682/ws/p2p/12D3KooWFrwFo7ry3dEuFwhehGSSN96a5Xdzxot7SWfXeSbhELAe" ], "telemetryEndpoints": [ [ -- GitLab From 404027e59d0f2eb9d12ba6248e5c086f95a3bc40 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Wed, 15 May 2024 20:51:13 +0800 Subject: [PATCH 011/106] Fix extrinsics count logging in frame-system (#4461) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The storage item ExtrinsicIndex is already taken before the `finalize()` in `note_finished_extrinsics()`, rendering it's always 0 in the log. This commit fixes it by using the proper API for extrinsics count. --------- Co-authored-by: Bastian Kรถcher Co-authored-by: Oliver Tale-Yazdi --- prdoc/pr_4461.prdoc | 10 ++++++++++ substrate/frame/system/src/lib.rs | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_4461.prdoc diff --git a/prdoc/pr_4461.prdoc b/prdoc/pr_4461.prdoc new file mode 100644 index 00000000000..2dafa381287 --- /dev/null +++ b/prdoc/pr_4461.prdoc @@ -0,0 +1,10 @@ +title: Fix extrinsics count logging in frame-system + +doc: + - audience: Runtime Dev + description: | + Fixes the issue of the number of extrinsics in the block always being 0 in the log of frame-system. + +crates: + - name: frame-system + bump: patch diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 30df4dcfd43..7ed954d83aa 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1780,7 +1780,7 @@ impl Pallet { "[{:?}] {} extrinsics, length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight:\ {} ({}%) op weight {} ({}%) / mandatory weight {} ({}%)", Self::block_number(), - Self::extrinsic_index().unwrap_or_default(), + Self::extrinsic_count(), Self::all_extrinsics_len(), sp_runtime::Percent::from_rational( Self::all_extrinsics_len(), -- GitLab From 59d7e0372f08499d9aefadbdda1e523f6b42bb65 Mon Sep 17 00:00:00 2001 From: Dastan <88332432+dastansam@users.noreply.github.com> Date: Wed, 15 May 2024 16:11:42 +0200 Subject: [PATCH 012/106] Export all public functions of `sc-service` (#4457) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://github.com/paritytech/polkadot-sdk/pull/3166 made private functions used in `spawn_tasks()` public but forgot to add them in exported functions of the crate. --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Bastian Kรถcher --- prdoc/pr_4457.prdoc | 15 +++++++++++++++ substrate/client/service/src/lib.rs | 9 +++++---- 2 files changed, 20 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_4457.prdoc diff --git a/prdoc/pr_4457.prdoc b/prdoc/pr_4457.prdoc new file mode 100644 index 00000000000..5c9bd982276 --- /dev/null +++ b/prdoc/pr_4457.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "sc-service: export all public functions" + +doc: + - audience: Node Dev + description: | + A PR #3166 converted private functions used in `spawn_tasks()` to public to make it possible to have custom + implementation of the `spawn_tasks()`. However, not all functions were included in the list of exports from + `sc-service` crate. + +crates: + - name: sc-service + bump: minor diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 444cb4a06eb..d251fd2b58f 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -54,10 +54,11 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ - build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_record_import, new_full_parts_with_genesis_builder, new_wasm_executor, - spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, + build_network, gen_rpc_module, init_telemetry, new_client, new_db_backend, new_full_client, + new_full_parts, new_full_parts_record_import, new_full_parts_with_genesis_builder, + new_wasm_executor, propagate_transaction_notifications, spawn_tasks, BuildNetworkParams, + KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, + TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, -- GitLab From e31fcffb55f32c33f7afff2c8e73c3f1d66d6c09 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Wed, 15 May 2024 17:51:51 +0200 Subject: [PATCH 013/106] Make vscode rustanalyzer fast again (#4470) This bump of versions: https://github.com/paritytech/polkadot-sdk/pull/4409/files#diff-13ee4b2252c9e516a0547f2891aa2105c3ca71c6d7a1e682c69be97998dfc87eR11936 reintroduced a dependency to proc-macro-crate 2.0.0 which is suffering from: https://github.com/bkchr/proc-macro-crate/pull/42 this, so bump parity-scale-codec to a newer version to eliminate the bad proc-macro-crate 2.0.0 dependency. --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: command-bot <> --- Cargo.lock | 30 ++++--------------- bridges/bin/runtime-common/Cargo.toml | 2 +- .../chains/chain-asset-hub-rococo/Cargo.toml | 2 +- .../chains/chain-asset-hub-westend/Cargo.toml | 2 +- .../chains/chain-polkadot-bulletin/Cargo.toml | 2 +- bridges/modules/beefy/Cargo.toml | 2 +- bridges/modules/grandpa/Cargo.toml | 2 +- bridges/modules/messages/Cargo.toml | 2 +- bridges/modules/parachains/Cargo.toml | 2 +- bridges/modules/relayers/Cargo.toml | 2 +- .../modules/xcm-bridge-hub-router/Cargo.toml | 2 +- bridges/modules/xcm-bridge-hub/Cargo.toml | 2 +- bridges/primitives/beefy/Cargo.toml | 2 +- bridges/primitives/header-chain/Cargo.toml | 2 +- bridges/primitives/messages/Cargo.toml | 2 +- bridges/primitives/parachains/Cargo.toml | 2 +- bridges/primitives/polkadot-core/Cargo.toml | 2 +- bridges/primitives/relayers/Cargo.toml | 2 +- bridges/primitives/runtime/Cargo.toml | 2 +- bridges/primitives/test-utils/Cargo.toml | 2 +- .../xcm-bridge-hub-router/Cargo.toml | 2 +- bridges/relays/client-substrate/Cargo.toml | 2 +- bridges/relays/lib-substrate-relay/Cargo.toml | 2 +- bridges/relays/parachains/Cargo.toml | 2 +- .../pallets/ethereum-client/Cargo.toml | 2 +- .../pallets/inbound-queue/Cargo.toml | 2 +- .../pallets/outbound-queue/Cargo.toml | 2 +- .../outbound-queue/merkle-tree/Cargo.toml | 2 +- .../outbound-queue/runtime-api/Cargo.toml | 2 +- bridges/snowbridge/pallets/system/Cargo.toml | 2 +- .../pallets/system/runtime-api/Cargo.toml | 2 +- .../snowbridge/primitives/beacon/Cargo.toml | 2 +- bridges/snowbridge/primitives/core/Cargo.toml | 2 +- .../snowbridge/primitives/ethereum/Cargo.toml | 2 +- .../snowbridge/primitives/router/Cargo.toml | 2 +- .../runtime/runtime-common/Cargo.toml | 2 +- .../snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/client/cli/Cargo.toml | 2 +- cumulus/client/collator/Cargo.toml | 2 +- cumulus/client/consensus/aura/Cargo.toml | 2 +- cumulus/client/consensus/common/Cargo.toml | 2 +- cumulus/client/network/Cargo.toml | 2 +- cumulus/client/parachain-inherent/Cargo.toml | 2 +- cumulus/client/pov-recovery/Cargo.toml | 2 +- .../client/relay-chain-interface/Cargo.toml | 2 +- .../relay-chain-rpc-interface/Cargo.toml | 2 +- cumulus/pallets/aura-ext/Cargo.toml | 2 +- cumulus/pallets/collator-selection/Cargo.toml | 2 +- cumulus/pallets/dmp-queue/Cargo.toml | 2 +- cumulus/pallets/parachain-system/Cargo.toml | 2 +- .../pallets/session-benchmarking/Cargo.toml | 2 +- cumulus/pallets/solo-to-para/Cargo.toml | 2 +- cumulus/pallets/xcm/Cargo.toml | 2 +- cumulus/pallets/xcmp-queue/Cargo.toml | 2 +- cumulus/parachains/common/Cargo.toml | 2 +- .../emulated/common/Cargo.toml | 2 +- .../tests/assets/asset-hub-rococo/Cargo.toml | 2 +- .../tests/assets/asset-hub-westend/Cargo.toml | 2 +- .../bridges/bridge-hub-rococo/Cargo.toml | 2 +- .../collectives-westend/Cargo.toml | 2 +- .../tests/people/people-rococo/Cargo.toml | 2 +- .../tests/people/people-westend/Cargo.toml | 2 +- .../pallets/collective-content/Cargo.toml | 2 +- .../pallets/parachain-info/Cargo.toml | 2 +- cumulus/parachains/pallets/ping/Cargo.toml | 2 +- .../assets/asset-hub-rococo/Cargo.toml | 2 +- .../assets/asset-hub-westend/Cargo.toml | 2 +- .../runtimes/assets/common/Cargo.toml | 2 +- .../runtimes/assets/test-utils/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 +- .../runtimes/bridge-hubs/common/Cargo.toml | 2 +- .../bridge-hubs/test-utils/Cargo.toml | 2 +- .../collectives-westend/Cargo.toml | 2 +- .../contracts/contracts-rococo/Cargo.toml | 2 +- .../coretime/coretime-rococo/Cargo.toml | 2 +- .../coretime/coretime-westend/Cargo.toml | 2 +- .../glutton/glutton-westend/Cargo.toml | 2 +- .../runtimes/people/people-rococo/Cargo.toml | 2 +- .../runtimes/people/people-westend/Cargo.toml | 2 +- .../runtimes/starters/seedling/Cargo.toml | 2 +- .../runtimes/starters/shell/Cargo.toml | 2 +- .../parachains/runtimes/test-utils/Cargo.toml | 2 +- .../runtimes/testing/penpal/Cargo.toml | 2 +- .../testing/rococo-parachain/Cargo.toml | 2 +- cumulus/polkadot-parachain/Cargo.toml | 2 +- cumulus/primitives/aura/Cargo.toml | 2 +- cumulus/primitives/core/Cargo.toml | 2 +- .../primitives/parachain-inherent/Cargo.toml | 2 +- .../storage-weight-reclaim/Cargo.toml | 2 +- cumulus/primitives/timestamp/Cargo.toml | 2 +- cumulus/primitives/utility/Cargo.toml | 2 +- cumulus/test/client/Cargo.toml | 2 +- cumulus/test/relay-sproof-builder/Cargo.toml | 2 +- cumulus/test/runtime/Cargo.toml | 2 +- cumulus/test/service/Cargo.toml | 2 +- cumulus/xcm/xcm-emulator/Cargo.toml | 2 +- docs/sdk/Cargo.toml | 2 +- polkadot/core-primitives/Cargo.toml | 2 +- polkadot/erasure-coding/Cargo.toml | 2 +- polkadot/node/collation-generation/Cargo.toml | 2 +- polkadot/node/core/approval-voting/Cargo.toml | 2 +- polkadot/node/core/av-store/Cargo.toml | 2 +- .../node/core/candidate-validation/Cargo.toml | 2 +- polkadot/node/core/chain-api/Cargo.toml | 2 +- polkadot/node/core/chain-selection/Cargo.toml | 2 +- .../node/core/dispute-coordinator/Cargo.toml | 2 +- .../core/prospective-parachains/Cargo.toml | 2 +- polkadot/node/core/pvf/Cargo.toml | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- .../node/core/pvf/execute-worker/Cargo.toml | 2 +- .../node/core/pvf/prepare-worker/Cargo.toml | 2 +- polkadot/node/jaeger/Cargo.toml | 2 +- polkadot/node/metrics/Cargo.toml | 2 +- .../availability-distribution/Cargo.toml | 2 +- .../network/availability-recovery/Cargo.toml | 2 +- polkadot/node/network/bridge/Cargo.toml | 2 +- .../node/network/collator-protocol/Cargo.toml | 2 +- .../network/dispute-distribution/Cargo.toml | 2 +- polkadot/node/network/protocol/Cargo.toml | 2 +- .../network/statement-distribution/Cargo.toml | 2 +- polkadot/node/primitives/Cargo.toml | 2 +- polkadot/node/service/Cargo.toml | 2 +- polkadot/node/subsystem-bench/Cargo.toml | 2 +- polkadot/node/subsystem-util/Cargo.toml | 2 +- polkadot/node/test/client/Cargo.toml | 2 +- .../node/zombienet-backchannel/Cargo.toml | 2 +- polkadot/parachain/Cargo.toml | 2 +- polkadot/parachain/test-parachains/Cargo.toml | 2 +- .../test-parachains/adder/Cargo.toml | 2 +- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../test-parachains/undying/Cargo.toml | 2 +- .../undying/collator/Cargo.toml | 2 +- polkadot/primitives/Cargo.toml | 2 +- polkadot/runtime/common/Cargo.toml | 2 +- .../common/slot_range_helper/Cargo.toml | 2 +- polkadot/runtime/metrics/Cargo.toml | 2 +- polkadot/runtime/parachains/Cargo.toml | 2 +- polkadot/runtime/rococo/Cargo.toml | 2 +- polkadot/runtime/test-runtime/Cargo.toml | 2 +- polkadot/runtime/westend/Cargo.toml | 2 +- polkadot/statement-table/Cargo.toml | 2 +- polkadot/xcm/Cargo.toml | 2 +- polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 2 +- polkadot/xcm/pallet-xcm/Cargo.toml | 2 +- polkadot/xcm/xcm-builder/Cargo.toml | 2 +- polkadot/xcm/xcm-executor/Cargo.toml | 2 +- .../xcm-executor/integration-tests/Cargo.toml | 2 +- .../xcm-fee-payment-runtime-api/Cargo.toml | 2 +- polkadot/xcm/xcm-simulator/Cargo.toml | 2 +- polkadot/xcm/xcm-simulator/example/Cargo.toml | 2 +- polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml | 2 +- substrate/bin/node/cli/Cargo.toml | 2 +- substrate/bin/node/inspect/Cargo.toml | 2 +- substrate/bin/node/runtime/Cargo.toml | 2 +- substrate/bin/node/testing/Cargo.toml | 2 +- substrate/client/api/Cargo.toml | 2 +- .../client/authority-discovery/Cargo.toml | 2 +- substrate/client/basic-authorship/Cargo.toml | 2 +- substrate/client/block-builder/Cargo.toml | 2 +- substrate/client/chain-spec/Cargo.toml | 2 +- substrate/client/cli/Cargo.toml | 2 +- substrate/client/consensus/aura/Cargo.toml | 2 +- substrate/client/consensus/babe/Cargo.toml | 2 +- substrate/client/consensus/beefy/Cargo.toml | 2 +- .../client/consensus/beefy/rpc/Cargo.toml | 2 +- substrate/client/consensus/epochs/Cargo.toml | 2 +- substrate/client/consensus/grandpa/Cargo.toml | 2 +- .../client/consensus/grandpa/rpc/Cargo.toml | 2 +- .../client/consensus/manual-seal/Cargo.toml | 2 +- substrate/client/consensus/pow/Cargo.toml | 2 +- substrate/client/consensus/slots/Cargo.toml | 2 +- substrate/client/db/Cargo.toml | 2 +- substrate/client/executor/Cargo.toml | 2 +- substrate/client/executor/wasmtime/Cargo.toml | 2 +- .../client/merkle-mountain-range/Cargo.toml | 2 +- .../merkle-mountain-range/rpc/Cargo.toml | 2 +- substrate/client/mixnet/Cargo.toml | 2 +- substrate/client/network-gossip/Cargo.toml | 2 +- substrate/client/network/Cargo.toml | 2 +- substrate/client/network/common/Cargo.toml | 2 +- substrate/client/network/light/Cargo.toml | 2 +- substrate/client/network/statement/Cargo.toml | 2 +- substrate/client/network/sync/Cargo.toml | 2 +- .../client/network/transactions/Cargo.toml | 2 +- substrate/client/offchain/Cargo.toml | 2 +- substrate/client/rpc-api/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- substrate/client/rpc/Cargo.toml | 2 +- substrate/client/service/Cargo.toml | 2 +- substrate/client/service/test/Cargo.toml | 2 +- substrate/client/state-db/Cargo.toml | 2 +- substrate/client/sync-state-rpc/Cargo.toml | 2 +- substrate/client/tracing/Cargo.toml | 2 +- substrate/client/transaction-pool/Cargo.toml | 2 +- .../client/transaction-pool/api/Cargo.toml | 2 +- substrate/frame/Cargo.toml | 2 +- substrate/frame/alliance/Cargo.toml | 2 +- substrate/frame/asset-conversion/Cargo.toml | 2 +- .../frame/asset-conversion/ops/Cargo.toml | 2 +- substrate/frame/asset-rate/Cargo.toml | 2 +- substrate/frame/assets/Cargo.toml | 2 +- substrate/frame/atomic-swap/Cargo.toml | 2 +- substrate/frame/aura/Cargo.toml | 2 +- .../frame/authority-discovery/Cargo.toml | 2 +- substrate/frame/authorship/Cargo.toml | 2 +- substrate/frame/babe/Cargo.toml | 2 +- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/balances/Cargo.toml | 2 +- substrate/frame/beefy-mmr/Cargo.toml | 2 +- substrate/frame/beefy/Cargo.toml | 2 +- substrate/frame/benchmarking/Cargo.toml | 2 +- substrate/frame/benchmarking/pov/Cargo.toml | 2 +- substrate/frame/bounties/Cargo.toml | 2 +- substrate/frame/broker/Cargo.toml | 2 +- substrate/frame/child-bounties/Cargo.toml | 2 +- substrate/frame/collective/Cargo.toml | 2 +- substrate/frame/contracts/Cargo.toml | 2 +- .../frame/contracts/mock-network/Cargo.toml | 2 +- substrate/frame/contracts/uapi/Cargo.toml | 2 +- substrate/frame/conviction-voting/Cargo.toml | 2 +- substrate/frame/core-fellowship/Cargo.toml | 2 +- substrate/frame/delegated-staking/Cargo.toml | 2 +- substrate/frame/democracy/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 2 +- .../test-staking-e2e/Cargo.toml | 2 +- .../election-provider-support/Cargo.toml | 2 +- .../benchmarking/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../solution-type/fuzzer/Cargo.toml | 2 +- substrate/frame/elections-phragmen/Cargo.toml | 2 +- substrate/frame/examples/basic/Cargo.toml | 2 +- .../frame/examples/default-config/Cargo.toml | 2 +- substrate/frame/examples/dev-mode/Cargo.toml | 2 +- .../frame/examples/frame-crate/Cargo.toml | 2 +- .../frame/examples/kitchensink/Cargo.toml | 2 +- .../multi-block-migrations/Cargo.toml | 2 +- .../frame/examples/offchain-worker/Cargo.toml | 2 +- .../single-block-migrations/Cargo.toml | 2 +- substrate/frame/examples/split/Cargo.toml | 2 +- substrate/frame/examples/tasks/Cargo.toml | 2 +- substrate/frame/executive/Cargo.toml | 2 +- substrate/frame/fast-unstake/Cargo.toml | 2 +- substrate/frame/glutton/Cargo.toml | 2 +- substrate/frame/grandpa/Cargo.toml | 2 +- substrate/frame/identity/Cargo.toml | 2 +- substrate/frame/im-online/Cargo.toml | 2 +- substrate/frame/indices/Cargo.toml | 2 +- .../Cargo.toml | 2 +- substrate/frame/lottery/Cargo.toml | 2 +- substrate/frame/membership/Cargo.toml | 2 +- .../frame/merkle-mountain-range/Cargo.toml | 2 +- substrate/frame/message-queue/Cargo.toml | 2 +- substrate/frame/migrations/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/multisig/Cargo.toml | 2 +- .../frame/nft-fractionalization/Cargo.toml | 2 +- substrate/frame/nfts/Cargo.toml | 2 +- substrate/frame/nfts/runtime-api/Cargo.toml | 2 +- substrate/frame/nis/Cargo.toml | 2 +- substrate/frame/node-authorization/Cargo.toml | 2 +- substrate/frame/nomination-pools/Cargo.toml | 2 +- .../nomination-pools/benchmarking/Cargo.toml | 2 +- .../nomination-pools/runtime-api/Cargo.toml | 2 +- .../nomination-pools/test-staking/Cargo.toml | 2 +- substrate/frame/offences/Cargo.toml | 2 +- .../frame/offences/benchmarking/Cargo.toml | 2 +- substrate/frame/paged-list/Cargo.toml | 2 +- substrate/frame/parameters/Cargo.toml | 2 +- substrate/frame/preimage/Cargo.toml | 2 +- substrate/frame/proxy/Cargo.toml | 2 +- substrate/frame/ranked-collective/Cargo.toml | 2 +- substrate/frame/recovery/Cargo.toml | 2 +- substrate/frame/referenda/Cargo.toml | 2 +- substrate/frame/remark/Cargo.toml | 2 +- substrate/frame/root-offences/Cargo.toml | 2 +- substrate/frame/root-testing/Cargo.toml | 2 +- substrate/frame/safe-mode/Cargo.toml | 2 +- substrate/frame/salary/Cargo.toml | 2 +- substrate/frame/sassafras/Cargo.toml | 2 +- substrate/frame/scheduler/Cargo.toml | 2 +- substrate/frame/scored-pool/Cargo.toml | 2 +- substrate/frame/session/Cargo.toml | 2 +- .../frame/session/benchmarking/Cargo.toml | 4 +-- substrate/frame/society/Cargo.toml | 2 +- substrate/frame/staking/Cargo.toml | 2 +- .../frame/staking/runtime-api/Cargo.toml | 2 +- .../frame/state-trie-migration/Cargo.toml | 2 +- substrate/frame/statement/Cargo.toml | 2 +- substrate/frame/sudo/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/support/test/Cargo.toml | 2 +- .../support/test/compile_pass/Cargo.toml | 2 +- .../frame/support/test/pallet/Cargo.toml | 2 +- .../support/test/stg_frame_crate/Cargo.toml | 2 +- .../deprecated_where_block.stderr | 8 ++--- substrate/frame/system/Cargo.toml | 2 +- .../frame/system/benchmarking/Cargo.toml | 2 +- .../frame/system/rpc/runtime-api/Cargo.toml | 2 +- substrate/frame/timestamp/Cargo.toml | 2 +- substrate/frame/tips/Cargo.toml | 2 +- .../frame/transaction-payment/Cargo.toml | 2 +- .../asset-conversion-tx-payment/Cargo.toml | 2 +- .../asset-tx-payment/Cargo.toml | 2 +- .../frame/transaction-payment/rpc/Cargo.toml | 2 +- .../rpc/runtime-api/Cargo.toml | 2 +- .../skip-feeless-payment/Cargo.toml | 2 +- .../frame/transaction-storage/Cargo.toml | 2 +- substrate/frame/treasury/Cargo.toml | 2 +- substrate/frame/try-runtime/Cargo.toml | 2 +- substrate/frame/tx-pause/Cargo.toml | 2 +- substrate/frame/uniques/Cargo.toml | 2 +- substrate/frame/utility/Cargo.toml | 2 +- substrate/frame/vesting/Cargo.toml | 2 +- substrate/frame/whitelist/Cargo.toml | 2 +- substrate/primitives/api/Cargo.toml | 2 +- substrate/primitives/api/test/Cargo.toml | 2 +- .../primitives/application-crypto/Cargo.toml | 2 +- substrate/primitives/arithmetic/Cargo.toml | 2 +- .../primitives/authority-discovery/Cargo.toml | 2 +- substrate/primitives/blockchain/Cargo.toml | 2 +- .../primitives/consensus/aura/Cargo.toml | 2 +- .../primitives/consensus/babe/Cargo.toml | 2 +- .../primitives/consensus/beefy/Cargo.toml | 2 +- .../primitives/consensus/grandpa/Cargo.toml | 2 +- substrate/primitives/consensus/pow/Cargo.toml | 2 +- .../primitives/consensus/sassafras/Cargo.toml | 2 +- .../primitives/consensus/slots/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/externalities/Cargo.toml | 2 +- .../primitives/genesis-builder/Cargo.toml | 2 +- substrate/primitives/inherents/Cargo.toml | 2 +- substrate/primitives/io/Cargo.toml | 2 +- substrate/primitives/keystore/Cargo.toml | 2 +- .../merkle-mountain-range/Cargo.toml | 2 +- substrate/primitives/metadata-ir/Cargo.toml | 2 +- substrate/primitives/mixnet/Cargo.toml | 2 +- .../primitives/npos-elections/Cargo.toml | 2 +- .../primitives/runtime-interface/Cargo.toml | 2 +- substrate/primitives/runtime/Cargo.toml | 2 +- substrate/primitives/session/Cargo.toml | 2 +- substrate/primitives/staking/Cargo.toml | 2 +- substrate/primitives/state-machine/Cargo.toml | 2 +- .../primitives/statement-store/Cargo.toml | 2 +- substrate/primitives/storage/Cargo.toml | 2 +- .../primitives/test-primitives/Cargo.toml | 2 +- substrate/primitives/timestamp/Cargo.toml | 2 +- substrate/primitives/tracing/Cargo.toml | 2 +- .../transaction-storage-proof/Cargo.toml | 2 +- substrate/primitives/trie/Cargo.toml | 2 +- substrate/primitives/version/Cargo.toml | 2 +- .../primitives/version/proc-macro/Cargo.toml | 2 +- .../primitives/wasm-interface/Cargo.toml | 2 +- substrate/primitives/weights/Cargo.toml | 2 +- substrate/test-utils/client/Cargo.toml | 2 +- substrate/test-utils/runtime/Cargo.toml | 2 +- .../runtime/transaction-pool/Cargo.toml | 2 +- substrate/utils/fork-tree/Cargo.toml | 2 +- .../utils/frame/benchmarking-cli/Cargo.toml | 2 +- .../frame/remote-externalities/Cargo.toml | 2 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- substrate/utils/frame/rpc/support/Cargo.toml | 2 +- substrate/utils/frame/rpc/system/Cargo.toml | 2 +- templates/minimal/pallets/template/Cargo.toml | 2 +- templates/minimal/runtime/Cargo.toml | 2 +- templates/parachain/node/Cargo.toml | 2 +- .../parachain/pallets/template/Cargo.toml | 2 +- templates/parachain/runtime/Cargo.toml | 2 +- .../solochain/pallets/template/Cargo.toml | 2 +- templates/solochain/runtime/Cargo.toml | 2 +- 370 files changed, 378 insertions(+), 398 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c91025ea34c..ded2cc53293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11959,9 +11959,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.11" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b5927e4a9ae8d6cdb6a69e4e04a0ec73381a358e21b8a576f44769f34e7c24" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -11974,11 +11974,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", @@ -14650,15 +14650,6 @@ dependencies = [ "toml_edit 0.19.15", ] -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -21545,17 +21536,6 @@ dependencies = [ "winnow", ] -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.2.3", - "toml_datetime", - "winnow", -] - [[package]] name = "toml_edit" version = "0.21.0" diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 74049031afe..783009a8c89 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,7 +11,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 9a6419a5b40..d9afe2c8bf7 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 1c08ee28e41..4b3ed052f13 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies diff --git a/bridges/chains/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml index 2db16a00e92..700247b7055 100644 --- a/bridges/chains/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/modules/beefy/Cargo.toml b/bridges/modules/beefy/Cargo.toml index 438f32fb146..e36bbb615f2 100644 --- a/bridges/modules/beefy/Cargo.toml +++ b/bridges/modules/beefy/Cargo.toml @@ -12,7 +12,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true } diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 0db1827211a..0ca6b675035 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -13,7 +13,7 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index df5b92db740..71c86ccc036 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } num-traits = { version = "0.2", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index 35213be0674..d3152f8d0a4 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index e2b7aca9224..08e1438d4f1 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 06f2a339bed..b80240c974d 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index 4483a379090..9b22770061a 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/primitives/beefy/Cargo.toml b/bridges/primitives/beefy/Cargo.toml index 2a13685207c..bd68076ca48 100644 --- a/bridges/primitives/beefy/Cargo.toml +++ b/bridges/primitives/beefy/Cargo.toml @@ -12,7 +12,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { default-features = false, features = ["alloc", "derive"], workspace = true } diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index f7a61a9ff32..def1f7ad4df 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index d41acfb9d32..20337873c2e 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 2e7000b86a5..a6e71876cef 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index 53b1e574cb1..d4b2f503e9e 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 1be7f1dc6eb..5081dddce1e 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 9a9b0291687..ac65ad538b4 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index d314c38683c..99f5ee0d1ae 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -15,7 +15,7 @@ bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } ed25519-dalek = { version = "2.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 94eece16d57..b94e7220245 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index 2c98441fc30..cb7eae4f340 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] async-std = { version = "1.9.0", features = ["attributes"] } async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["macros", "ws-client"] } log = { workspace = true } diff --git a/bridges/relays/lib-substrate-relay/Cargo.toml b/bridges/relays/lib-substrate-relay/Cargo.toml index 3f657645b59..077d1b1ff35 100644 --- a/bridges/relays/lib-substrate-relay/Cargo.toml +++ b/bridges/relays/lib-substrate-relay/Cargo.toml @@ -14,7 +14,7 @@ workspace = true anyhow = "1.0" async-std = "1.9.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" hex = "0.4" log = { workspace = true } diff --git a/bridges/relays/parachains/Cargo.toml b/bridges/relays/parachains/Cargo.toml index a73a2f5b31c..8d38e4e6bd0 100644 --- a/bridges/relays/parachains/Cargo.toml +++ b/bridges/relays/parachains/Cargo.toml @@ -23,6 +23,6 @@ bp-polkadot-core = { path = "../../primitives/polkadot-core" } relay-substrate-client = { path = "../client-substrate" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } sp-core = { path = "../../../substrate/primitives/core" } diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 0e15304ff11..e60934e3474 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } serde_json = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 71d49e684e0..d63398770f2 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 387491abae0..15c6c3a5b32 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 2d58517c18b..1b1a9905928 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.1.5", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index fa49cc0f29a..b8d704f1cb9 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.1.5", package = "parity-scale-codec", features = ["derive"], default-features = false } +codec = { version = "3.6.12", package = "parity-scale-codec", features = ["derive"], default-features = false } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index c1ee44214c8..5bbbb1d9310 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 4073dc0f71c..42df5edfb7b 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 7d901bcdb04..18123910c35 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } hex = { version = "0.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } rlp = { version = "0.5", default-features = false } diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 9a299ad0ae9..573ab6608e5 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] serde = { optional = true, features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index d72cd266173..fb0b6cbaf3c 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } serde-big-array = { optional = true, features = ["const-generics"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } ethbloom = { version = "0.13.0", default-features = false } ethereum-types = { version = "0.14.1", default-features = false, features = ["codec", "rlp", "serialize"] } diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 361b539af3e..1d3fc43909d 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } log = { workspace = true } diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index 995475349e4..2372908b86a 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } frame-support = { path = "../../../../substrate/frame/support", default-features = false } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 7cbb3857403..20c3fc012d0 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -11,7 +11,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 0b2edb593c4..410ac8b983d 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } url = "2.4.0" # Substrate diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 42f7342d1a5..39cedf87a0c 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" tracing = "0.1.25" diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 70dd67cb9a0..547137b7306 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" schnellru = "0.2.1" diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index fb4a85ad122..3a7c6b57d6d 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } dyn-clone = "1.0.16" futures = "0.3.28" log = { workspace = true, default-features = true } diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 1210975ef69..d4fc7528725 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" parking_lot = "0.12.1" diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 6e9adab1ffc..85619e84034 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = { version = "2.11.1", features = ["derive"] } tracing = { version = "0.1.37" } diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 571935620d6..7afe7fae34b 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" rand = "0.8.5" diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 6df9847252f..5962c68bba7 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -23,4 +23,4 @@ futures = "0.3.28" async-trait = "0.1.79" thiserror = { workspace = true } jsonrpsee-core = "0.22" -parity-scale-codec = "3.6.4" +parity-scale-codec = "3.6.12" diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 14981677289..2ec42ebca85 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -32,7 +32,7 @@ tokio-util = { version = "0.7.8", features = ["compat"] } futures = "0.3.28" futures-timer = "3.0.2" -parity-scale-codec = "3.6.4" +parity-scale-codec = "3.6.12" jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" async-trait = "0.1.79" diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index fe717596f9b..daff5ef8f48 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 25ca2fe057b..f30802fa5d8 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.0.0" } +codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.6.12" } rand = { version = "0.8.5", features = ["std_rng"], default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index b2b24aeed72..687cda164fb 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -14,7 +14,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 57e274db361..0c94d0d05a6 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bytes = { version = "1.4.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { workspace = true } diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index 43fde4ea600..001c3d8acea 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parity-scale-codec = { version = "3.6.4", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 417038d7833..17b0fb2a016 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 9122e110fb9..178d981702f 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -10,7 +10,7 @@ description = "Pallet for stuff specific to parachains' usage of XCM" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index e3530ef7bf0..1941214da2e 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index fa16205d0fd..2b943b6dca5 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 8c44cce7d92..b010d2a2963 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -10,7 +10,7 @@ description = "Common resources for integration testing with xcm-emulator" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } paste = "1.0.14" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index ddd6d2d0498..9abecbecc48 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } assert_matches = "1.5.0" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 0a2b0f6d45e..4fd7b5bfdf2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } assert_matches = "1.5.0" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index bbe54c367ba..bed5af92f6e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } hex-literal = "0.4.1" diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index d1dbef9fc41..297f68de621 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } assert_matches = "1.5.0" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 1570aa7662f..29a939951e5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -8,7 +8,7 @@ description = "People Rococo runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } # Substrate sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index bc093dc0de6..6eab6f52aa7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -8,7 +8,7 @@ description = "People Westend runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } # Substrate sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 207259bee52..92e0a546313 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 17981d238fd..01ee12bf4e7 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -10,7 +10,7 @@ description = "Pallet to store the parachain ID" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 15169b08b91..f51946e9ebd 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -10,7 +10,7 @@ description = "Ping Pallet for Cumulus XCM/UMP testing." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index c3f1c8b4f22..95ce7efdf3f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index bacc9c1b7c2..3d27f52d0d5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 12dfd9da1ff..4664e0cb9a7 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } impl-trait-for-tuples = "0.2.2" diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 883c93c97b4..776c7dce0b4 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate frame-support = { path = "../../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 574406ab305..0fb12531b8e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } hex-literal = { version = "0.4.1" } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index a7241cc6d10..3fbb95a17d0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 2f5f783ce48..aece34613e6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -7,7 +7,7 @@ description = "Bridge hub common utilities" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 5f2a6e050d8..5a8fa18b929 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } impl-trait-for-tuples = "0.2" log = { workspace = true } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 8e7aa6d3464..a7f51722242 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -10,7 +10,7 @@ description = "Westend Collectives Parachain Runtime" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index 74c5b5f8115..4040e977faf 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index ee9f5e87ec8..b92dc57989c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 60cc7e2f765..a377e243af3 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 808bed38732..ccd73fb5ee6 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -10,7 +10,7 @@ description = "Glutton parachain runtime." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 7183be5fc82..2b990d9270f 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 576c3b1aa4e..cc7b6a6e2ff 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index eb702c9f2cd..469269e37ff 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index f66d04fec1f..ff388d2fa2e 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index eda88beb7da..475acb13b8b 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 4ebb95f26cf..0ac79a3eab5 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index df3aaa92c79..e74caf6b1f4 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index 3d0aa94e8de..a22606edb6c 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -17,7 +17,7 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.28" hex-literal = "0.4.1" log = { workspace = true, default-features = true } diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 21c06ef22d9..ef96f334d63 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -10,7 +10,7 @@ description = "Core primitives for Aura in Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../substrate/primitives/api", default-features = false } diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 62c3f675191..595aa5f72bf 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -10,7 +10,7 @@ description = "Cumulus related core primitive types and traits" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 4da561661b6..0156eb02e2b 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 6dbf7904bf7..bdfb83ad72a 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index 59f327b2642..7a6f4787ba3 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } futures = "0.3.28" # Substrate diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 1e2c300b9ba..85e3ac2f760 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -10,7 +10,7 @@ description = "Helper datatypes for Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } # Substrate diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 254361e8542..120983eb939 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -9,7 +9,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate sc-service = { path = "../../../substrate/client/service" } diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index ff5c4bd66b9..d775c61f780 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -10,7 +10,7 @@ description = "Mocked relay state proof builder for testing Cumulus." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 1969045640e..eb160bd3355 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -9,7 +9,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 18213b2f632..c54e19d0238 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -15,7 +15,7 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } criterion = { version = "0.5.1", features = ["async_tokio"] } jsonrpsee = { version = "0.22", features = ["server"] } rand = "0.8.5" diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 6b45770a8e3..0ed77bf5b70 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } paste = "1.0.14" log = { workspace = true } lazy_static = "1.4.0" diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index fe53845d849..269ed4d012c 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] # Needed for all FRAME-based code -parity-scale-codec = { version = "3.0.0", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ "experimental", diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 8dfa0b87328..9794f8286ac 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index db5967e20f5..b230631f72b 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -13,7 +13,7 @@ workspace = true polkadot-primitives = { path = "../primitives" } polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } thiserror = { workspace = true } diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index ebc53a9e01b..0a28c3a830d 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -20,7 +20,7 @@ polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 5139d6c6a3f..5bf80d59ede 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" futures-timer = "3.0.2" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } schnellru = "0.2.1" diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index bc9b979228a..c5b3c382011 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -17,7 +17,7 @@ thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } bitvec = "1.0.0" -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 0cf4707aad2..e79b3a734b8 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -16,7 +16,7 @@ futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", path = "../../../../substrate/primitives/maybe-compressed-blob" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } polkadot-primitives = { path = "../../../primitives" } polkadot-parachain-primitives = { path = "../../../parachain" } diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index f4d02d3f47b..bd8531c2078 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -21,7 +21,7 @@ sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } [dev-dependencies] futures = { version = "0.3.30", features = ["thread-pool"] } maplit = "1.0.2" -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 318f27a4308..b58053b5417 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.13.0" thiserror = { workspace = true } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index dba3bcdd643..8bd510697c9 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" kvdb = "0.13.0" thiserror = { workspace = true } schnellru = "0.2.1" diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index d38a23c3fda..5b4f12a5fbd 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = "3.6.4" +parity-scale-codec = "3.6.12" thiserror = { workspace = true } fatality = "0.1.1" bitvec = "1" diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 9666206b1e7..ba9954a1066 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -25,7 +25,7 @@ tempfile = "3.3.0" thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index adf353fe2e4..5ad7409cc6c 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -17,7 +17,7 @@ libc = "0.2.152" nix = { version = "0.28.0", features = ["resource", "sched"] } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 3480264d1da..ac90fac4d57 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -16,7 +16,7 @@ cfg-if = "1.0" nix = { version = "0.28.0", features = ["process", "resource", "sched"] } libc = "0.2.152" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } polkadot-parachain-primitives = { path = "../../../../parachain" } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 12628565e3a..1850a204890 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -20,7 +20,7 @@ tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } nix = { version = "0.28.0", features = ["process", "resource", "sched"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } polkadot-primitives = { path = "../../../../primitives" } diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index bee725c0876..f879f9550d0 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -21,4 +21,4 @@ sp-core = { path = "../../../substrate/primitives/core" } thiserror = { workspace = true } tokio = "1.37" log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.1", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index fbf0abf829e..e3a53cc6df1 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -21,7 +21,7 @@ sc-cli = { path = "../../../substrate/client/cli" } substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } primitives = { package = "polkadot-primitives", path = "../../primitives" } bs58 = { version = "0.5.0", features = ["alloc"] } log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index ff352944908..39e2985a88c 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = { version = "3.6.1", features = ["std"] } +parity-scale-codec = { version = "3.6.12", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-network-protocol = { path = "../protocol" } diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index d12c1b1cff9..eb503f502b2 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -25,7 +25,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../protocol" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 4bb49baba92..b609fb1e071 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -15,7 +15,7 @@ async-trait = "0.1.79" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } polkadot-node-metrics = { path = "../../metrics" } diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index aa60c0166d2..c02999a59b5 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -38,7 +38,7 @@ sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sc-keystore = { path = "../../../../substrate/client/keystore" } sc-network = { path = "../../../../substrate/client/network" } -parity-scale-codec = { version = "3.6.1", features = ["std"] } +parity-scale-codec = { version = "3.6.12", features = ["std"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index eb8a7606304..dff285590d9 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } derive_more = "0.99.17" -parity-scale-codec = { version = "3.6.1", features = ["std"] } +parity-scale-codec = { version = "3.6.12", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 2b741051b4f..c5015b8c645 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -16,7 +16,7 @@ hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-jaeger = { path = "../../jaeger" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sc-network-types = { path = "../../../../substrate/client/network/types" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index da8c91a0a29..1fe761bd0e3 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -22,7 +22,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "2.0.0" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } thiserror = { workspace = true } fatality = "0.1.1" bitvec = "1" diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index a4bbd824e67..526d4e480bb 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bounded-vec = "0.7" futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-core = { path = "../../../substrate/primitives/core" } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 7c010778d50..b3f1c22d0e7 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -90,7 +90,7 @@ thiserror = { workspace = true } kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.12", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } parking_lot = "0.12.1" bitvec = { version = "1.0.1", optional = true } diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index e56efbf8254..37c6681b273 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -55,7 +55,7 @@ rand_distr = "0.4.3" bitvec = "1.0.1" kvdb-memorydb = "0.13.0" -parity-scale-codec = { version = "3.6.1", features = ["derive", "std"] } +parity-scale-codec = { version = "3.6.12", features = ["derive", "std"] } tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index 492a9847c96..219ea4d3f57 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -14,7 +14,7 @@ async-trait = "0.1.79" futures = "0.3.30" futures-channel = "0.3.23" itertools = "0.11" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 7db00404eb8..55d4d81d1c2 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } # Polkadot dependencies polkadot-test-runtime = { path = "../../../runtime/test-runtime" } diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 9139c6a4e5e..a0233bb46e5 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -17,7 +17,7 @@ url = "2.3.1" tokio-tungstenite = "0.20.1" futures-util = "0.3.30" lazy_static = "1.4.0" -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../gum" } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 15eea2addc8..1344baac64b 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -13,7 +13,7 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 6acdedf67ff..22f3d2942e0 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } adder = { package = "test-parachain-adder", path = "adder" } halt = { package = "test-parachain-halt", path = "halt" } diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index eec19ef788a..273fa93a50f 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } dlmalloc = { version = "0.2.4", features = ["global"] } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 5a2b5405741..dbc8507d599 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,7 +15,7 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 82ceebcf4ee..f2067a2c3b9 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } dlmalloc = { version = "0.2.4", features = ["global"] } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index cacf7304f90..28efdbbf242 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,7 +15,7 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 99800afc37f..603d08b8fee 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 4219a7e7b0d..3a641488176 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index cacafd8ed3b..314e101ad22 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] paste = "1.0" enumn = "0.1.12" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 48162754286..76c1d134fa1 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -parity-scale-codec = { version = "3.6.1", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 402c6e487a1..d00a19c6ddb 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index f4d8fb51b3f..4765de08c1a 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { workspace = true } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 6552ed4ef8a..596cc974c82 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index f02cae0e9d4..6a919dd00a9 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 37b8a99d640..ad4a053fa3f 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -10,7 +10,7 @@ description = "Stores messages other authorities issue about candidates in Polka workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } primitives = { package = "polkadot-primitives", path = "../primitives" } gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index f10f45b0b4f..2cd8e822ae1 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -15,7 +15,7 @@ bounded-collections = { version = "0.2.0", default-features = false, features = derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { features = ["alloc", "derive", "rc"], workspace = true } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 9691ddd4816..8bf3b9abf66 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -13,7 +13,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index fc4d23426fb..6f9b389ab6f 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 997ca99fb12..707e4aac796 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.1" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index aebc768bb90..64b2d405b90 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 9c9c53f0ee1..37c2117e7b0 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } futures = "0.3.30" diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml index cec76e7327e..6fa0236dfb4 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml @@ -11,7 +11,7 @@ description = "XCM fee payment runtime API" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index c1c48b6d4c5..9324359d365 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } paste = "1.0.7" frame-support = { path = "../../../substrate/frame/support" } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index 0e13a10a141..8b04170e303 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -10,7 +10,7 @@ version = "7.0.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index ca794a07bfb..6b3b4018d9f 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } honggfuzz = "0.5.55" arbitrary = "1.3.2" scale-info = { version = "2.11.1", features = ["derive"] } diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index a77e197cf6f..ec9d6c306b5 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -42,7 +42,7 @@ crate-type = ["cdylib", "rlib"] # third-party dependencies array-bytes = "6.2.2" clap = { version = "4.5.3", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.30" diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 8453aa3cdeb..5e4488903bf 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } thiserror = { workspace = true } sc-cli = { path = "../../../client/cli" } sc-client-api = { path = "../../../client/api" } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 00eab9b75f6..98b8c0ae6bf 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index fa3f90193ba..09db10563fb 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } fs_extra = "1" futures = "0.3.30" log = { workspace = true, default-features = true } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index fb650c5b532..147ea2bfbf5 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } fnv = "1.0.6" diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index ac4537d5ba0..435ca88a800 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.12.4" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } futures = "0.3.30" futures-timer = "3.0.1" ip_network = "0.4.1" diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 4890b66c9b2..b75cb463b1a 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index e74d587d9b4..62efe977e98 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } sp-api = { path = "../../primitives/api" } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index dd7bb3598c2..84ef89783ad 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } memmap2 = "0.9.3" serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 317a344cf58..1f3bce799b2 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -25,7 +25,7 @@ itertools = "0.11" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } names = { version = "0.14.0", default-features = false } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 64e2d16cd91..d1460c45356 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" log = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index b001e3d117a..c51082a018b 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" log = { workspace = true, default-features = true } num-bigint = "0.4.3" diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 9336841146e..193acbe52a1 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -15,7 +15,7 @@ workspace = true array-bytes = "6.2.2" async-channel = "1.8.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } fnv = "1.0.6" futures = "0.3.30" log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index 0959424ba86..07e46dbda15 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/epochs/Cargo.toml b/substrate/client/consensus/epochs/Cargo.toml index ff6bf86a6a4..e409e171e47 100644 --- a/substrate/client/consensus/epochs/Cargo.toml +++ b/substrate/client/consensus/epochs/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } fork-tree = { path = "../../../utils/fork-tree" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 235017d20ce..9099761fbce 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -25,7 +25,7 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index 9b73418c958..d4e72baef3e 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -17,7 +17,7 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } sc-client-api = { path = "../../../api" } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 7aa8df248b7..33f5bf1f8c1 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } assert_matches = "1.3.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index ecfa29aa194..51a2be1b6cf 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 4ac6ce90713..8e88ee68d7d 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index f67a662949a..b10c42d50f0 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } hash-db = "0.16.0" diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index c08a7f5af34..1f54b82030f 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -21,7 +21,7 @@ parking_lot = "0.12.1" schnellru = "0.2.1" tracing = "0.1.29" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } sc-executor-common = { path = "common" } sc-executor-polkavm = { path = "polkavm" } sc-executor-wasmtime = { path = "wasmtime" } diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index f3fef404691..d3d670650db 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -50,5 +50,5 @@ sc-runtime-test = { path = "../runtime-test" } sp-io = { path = "../../../primitives/io" } tempfile = "3.3.0" paste = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } cargo_metadata = "0.15.4" diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 46b7a1011c4..3cf3cdd15da 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" log = { workspace = true, default-features = true } sp-api = { path = "../../primitives/api" } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index ec790790678..25e6e316a8b 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { path = "../../../primitives/api" } diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 2ea152221ac..16263056394 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.2.2" arrayvec = "0.7.2" blake2 = "0.10.4" bytes = "1" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index ad81381edea..3eeea665118 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -34,6 +34,6 @@ sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] tokio = "1.37" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index f5f6479c41f..5a469469539 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -26,7 +26,7 @@ async-trait = "0.1.79" asynchronous-codec = "0.6" bytes = "1" cid = "0.9.0" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } either = "1.5.3" fnv = "1.0.6" futures = "0.3.30" diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index ca510a2ae70..9a1bf5b88ea 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.12.4" [dependencies] async-trait = "0.1.79" bitflags = "1.3.2" -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } futures = "0.3.30" diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index 2abefd4f8e2..baaed578b88 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.12.4" [dependencies] async-channel = "1.8.0" array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } futures = "0.3.30" diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index bcfcf24864c..0dfaa491b65 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" async-channel = "1.8.0" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index b25a3657b6a..964090444b2 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -22,7 +22,7 @@ prost-build = "0.12.4" array-bytes = "6.2.2" async-channel = "1.8.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" libp2p = "0.51.4" diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 7510db808f4..d871b59b37b 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index c4d07ceec1a..2944ff7f4f4 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" bytes = "1.1" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } fnv = "1.0.6" futures = "0.3.30" futures-timer = "3.0.2" diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index c5613662b9f..d8f833e2b8d 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index e1f799e3372..8977c842d03 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -30,7 +30,7 @@ sp-version = { path = "../../primitives/version" } sc-client-api = { path = "../api" } sc-utils = { path = "../utils" } sc-rpc = { path = "../rpc" } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } thiserror = { workspace = true } serde = { workspace = true, default-features = true } hex = "0.4" diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index dff34215b02..7dd46b2ab4c 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index b93196e86f1..dfdd485f15c 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -63,7 +63,7 @@ sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sc-client-db = { path = "../db", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } sc-executor = { path = "../executor" } sc-transaction-pool = { path = "../transaction-pool" } sp-transaction-pool = { path = "../../primitives/transaction-pool" } diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 8766868cede..e95e06cee26 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.2.2" fdlimit = "0.3.0" futures = "0.3.30" log = { workspace = true, default-features = true } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" parking_lot = "0.12.1" tempfile = "3.1.0" tokio = { version = "1.22.0", features = ["time"] } diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml index 400dda20c22..e203eb5a328 100644 --- a/substrate/client/state-db/Cargo.toml +++ b/substrate/client/state-db/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" sp-core = { path = "../../primitives/core" } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index fd053d326e9..d5bdc920f7c 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index cad59ef91e4..df674d24c6d 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] ansi_term = "0.12.1" is-terminal = "0.4.9" chrono = "0.4.31" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } lazy_static = "1.4.0" libc = "0.2.152" log = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 5f0b90ffe5d..351650297ff 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.2" linked-hash-map = "0.5.4" diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index 1bb72ef5544..be80a7706b3 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 44e8d681b01..3942f06ce6e 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -parity-scale-codec = { version = "3.2.2", default-features = false, features = [ +parity-scale-codec = { version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index cd91ea79796..10e2feba623 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { version = "6.2.2", optional = true } log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index cf50d7b22af..bfcda2299d5 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { version = "0.4.20", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml index e421e904a3a..c5efbf9f6f4 100644 --- a/substrate/frame/asset-conversion/ops/Cargo.toml +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { version = "0.4.20", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index cd502148a8d..4662469e46c 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index ed6df77e152..9647ae4db6b 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index c641071df90..8083c12d4b3 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 92ff3a0c565..9264d2f4a64 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index a7aba711a56..c21f9b5c904 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index 2bfd59a48e1..dd78e3404ef 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index 9f6ef2bc05e..d06b7f74546 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 5deb504d0a4..3429d2f28a6 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 1cc9ac5d8fd..4da14aea128 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index bfdf91c091b..51abc306265 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index f181f4d41cd..890ac1399b9 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 04a10314a95..b5824ab2ec2 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 5d3aaa78904..e4f3c272a63 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "..", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 3307e47e981..fac00543590 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index ce8d4153045..8f3f30ec58e 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } sp-api = { path = "../../primitives/api", default-features = false } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index 14a5e25e13d..09271632df5 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 850390409ab..d966370238b 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 52c8fceb504..bd4ded1a117 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] paste = { version = "1.0", default-features = false } bitflags = "1.3" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 387c3ca39d0..a348b7308d1 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -12,7 +12,7 @@ description = "A mock network for testing pallet-contracts" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index d9a5ee14f05..80de7a1d5d6 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -15,7 +15,7 @@ workspace = true paste = { version = "1.0", default-features = false } bitflags = "1.0" scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } -scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +scale = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ], optional = true } diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index ffb5122ed7f..20de4d858ad 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index b4258281b70..8773a124cd0 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index a9cbd758ed0..4a489882711 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME delegated staking pallet" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index edd2d742b50..7f182447ead 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 2074b51f50f..43e3e7079d2 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index 25c280921f8..fc696e04d68 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 0d9748ee34e..1c63f90720f 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/election-provider-support/benchmarking/Cargo.toml b/substrate/frame/election-provider-support/benchmarking/Cargo.toml index 6e13f17bec1..c2e644cfefa 100644 --- a/substrate/frame/election-provider-support/benchmarking/Cargo.toml +++ b/substrate/frame/election-provider-support/benchmarking/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 09c6a492dd0..3f8893dad6f 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -24,7 +24,7 @@ proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" [dev-dependencies] -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" scale-info = "2.11.1" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 1fb9e2387ed..98da507384f 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -20,7 +20,7 @@ clap = { version = "4.5.3", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["small_rng", "std"] } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = ".." } frame-election-provider-support = { path = "../.." } diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index 81dc48476a0..dbcb740518b 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index 43b37c6beba..ba9f9eca27d 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 2aa062ee6c1..0ad5b56cb6f 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 71b97796ecd..d7570f57094 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 48cb25f9094..29984bab3e0 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame = { package = "polkadot-sdk-frame", path = "../..", default-features = false, features = ["experimental", "runtime"] } diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index d8311897c6e..db3e22daa01 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/examples/multi-block-migrations/Cargo.toml b/substrate/frame/examples/multi-block-migrations/Cargo.toml index 28eca857715..61bb2bc61b4 100644 --- a/substrate/frame/examples/multi-block-migrations/Cargo.toml +++ b/substrate/frame/examples/multi-block-migrations/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } pallet-migrations = { path = "../../migrations", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 9363f753352..23ce79c3440 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index b1d560a85f3..080500f6296 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] docify = "0.2.8" log = { version = "0.4.21", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-executive = { path = "../../executive", default-features = false } diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index 1ef3521e060..6cb4d7ddd6c 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 3f59d57ea0f..95246ef3f66 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -14,7 +14,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 22fcaa993ab..4cce0fa9f95 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] aquamarine = "0.5.0" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index f05f22f7641..5b7121e2eae 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 5ce010f1c26..730c4e70935 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index f4dd92129f3..302ce327aed 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 8c0052004ae..e0bce8a77bd 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 46b416f0f9a..78192a81d7b 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 8684f347270..248bae003ed 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index f4d65d9e560..c2ec14cb4bc 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index 5f79704445f..be59e5ec893 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 6f67db0ae70..9f19c409736 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 8a301387ae6..0d73c567cf4 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index f263c41831b..e44cbeb1550 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet to queue and process messages" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 4726ac5c521..69e910a4e4f 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" impl-trait-for-tuples = "0.2.2" log = "0.4.21" diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 964d6acb889..44a567d668f 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 2437acbc2e2..649a7100325 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index b5a929468f7..e2a7e34c637 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 4f818ea3e08..5c5c011c94e 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } enumflags2 = { version = "0.7.7" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 84cbd1f51c9..6bee98fb51e 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } pallet-nfts = { path = "..", default-features = false } sp-api = { path = "../../../primitives/api", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index d0ba74a9273..1e3a0609c46 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 63376163cdc..17ed16d2623 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index eddcc8e4e1d..bf4e01a3184 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 4985d7acbec..3186bce5164 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # FRAME diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index 7828f26fe6f..a0ddac9e045 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } pallet-nomination-pools = { path = "..", default-features = false } diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-staking/Cargo.toml index 130a27752bf..ada52db6de5 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-staking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = { version = "2.11.1", features = ["derive"] } sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index f8efc88bafc..a59ef9334f0 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 07905a1e0aa..bbd918a2883 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-election-provider-support = { path = "../../election-provider-support", default-features = false } diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 26f3d7e48ce..f550e694349 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index b718b391019..c4d6d189d2d 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Acala Developers", "Parity Technologies "] edition.workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index d67fc7bead0..d420accbd6d 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet for storing preimages of hashes" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 0a3b39e471d..fcebbb5f3e8 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 0a659580775..05ce76cad2b 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 43608de37fc..2fd63597da9 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index f4e0171443a..dde522ff89b 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] assert_matches = { version = "1.5", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index e746b0382ae..d251aacfb5b 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index f4d83c237b9..e7317d737fa 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } pallet-session = { path = "../session", default-features = false, features = ["historical"] } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index bf14516ee32..74a3b8f479f 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index b6b7e5a67e4..7ecbdb6eeda 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 8c77edcb173..25911269a95 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index c9a70a730d4..82fb9a1d8c5 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 40a71736447..e851f876112 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 92b70e01b9a..f25bd1f1769 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 86814f8276e..42ea957ac15 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index a00fbd8f6fd..a306f9015c0 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } @@ -28,7 +28,7 @@ sp-session = { path = "../../../primitives/session", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = "2.11.1" frame-election-provider-support = { path = "../../election-provider-support" } pallet-balances = { path = "../../balances" } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index df71f79a29f..ed7fea523bf 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } rand_chacha = { version = "0.3.1", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 996e1abb6a6..22df746d667 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 50a19be92da..19da2f24ff0 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../../../primitives/api" } sp-staking = { default-features = false, path = "../../../primitives/staking" } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 613308c308e..0870989d81f 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 92bc32191ab..989f0c330fc 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 805f46a77f2..fcbb00087e2 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 9c977125673..a6c4fd6ee30 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.2.2", default-features = false } serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 88124e0a43b..6e861ad769c 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" serde = { features = ["derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../../primitives/api", default-features = false } diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 3f52b4664b1..37c069247e1 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 7a20c3f2730..8607339a2b0 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 554c81ab43d..5b97db60c00 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame = { package = "polkadot-sdk-frame", path = "../../..", default-features = false, features = ["experimental", "runtime"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 10418b915e3..b28cae2ddef 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -107,7 +107,7 @@ note: required because it appears within the type `RuntimeEvent` 28 | | } | |_^ note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.11/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -137,7 +137,7 @@ note: required because it appears within the type `RuntimeEvent` 28 | | } | |_^ note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.11/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` @@ -286,7 +286,7 @@ note: required because it appears within the type `RuntimeCall` 28 | | } | |_^ note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.11/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -317,7 +317,7 @@ note: required because it appears within the type `RuntimeCall` 28 | | } | |_^ note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.11/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 346aa054159..a2a8970814b 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] cfg-if = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 473a6bb132d..022f0ffce6b 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 70e66769a8b..b134cc3b617 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } sp-api = { path = "../../../../primitives/api", default-features = false } [features] diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index da49b29c89b..93ce09611b5 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index a2acf0638ff..bcd54461406 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 24e5a714f0f..4f7da9ae46f 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index fef9afdee05..177621d9adb 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -23,7 +23,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } pallet-transaction-payment = { path = "..", default-features = false } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index fc4f1aecc15..a4a8efad869 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -28,7 +28,7 @@ pallet-transaction-payment = { path = "..", default-features = false } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 7f5e0d0b466..2c9f814460f 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } sp-api = { path = "../../../primitives/api" } diff --git a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 913dccc05c4..6c0241ec5c0 100644 --- a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } pallet-transaction-payment = { path = "../..", default-features = false } sp-api = { path = "../../../../primitives/api", default-features = false } sp-runtime = { path = "../../../../primitives/runtime", default-features = false } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml index 8a6ee09f8dd..4d32a5123cf 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -22,7 +22,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } [features] diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index f5a964207ea..bf647ca13ec 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 34037338a52..c93272af11d 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index 15c8ca5d27a..e4e5f1940b2 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } sp-api = { path = "../../primitives/api", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 5f028179037..e44bb90dd7f 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index ee6af191d33..65b727b40b2 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 2ad575ed51f..00e8be75a3d 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index e71731e3977..7372b842403 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 5c28fe29142..61bbb278019 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 2f553819b1b..f48480f398d 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } sp-api-proc-macro = { path = "proc-macro", default-features = false } sp-core = { path = "../core", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index a4af08c4b89..b49f774161f 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -22,7 +22,7 @@ sp-tracing = { path = "../../tracing" } sp-runtime = { path = "../../runtime" } sp-consensus = { path = "../../consensus/common" } sc-block-builder = { path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } sp-state-machine = { path = "../../state-machine" } trybuild = "1.0.88" rustversion = "1.0.6" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 20e2be4d155..cbb9f213357 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { path = "../core", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 8acb1e1992c..a9f2b80156f 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 88d93f40059..72a8bb7fc47 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index e716b61bfeb..5e51a2d06ed 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index b689c84f158..a5449917817 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 799d474aebe..46c032ba61a 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index c38d004cf9b..a682939a02f 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 6c228383d00..f63f5f3122f 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index 7a884f865fb..0700e2c4f8b 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-core = { path = "../../core", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 50348054da0..c8eb9b76b93 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -18,7 +18,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index a8b12900617..dd519eab464 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index b7f3a999765..f931faf8bd0 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 20fa3e3e397..3a0d0315e91 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } environmental = { version = "1.1.3", default-features = false } sp-storage = { path = "../storage", default-features = false } diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 96e99553294..4fc8a0416fb 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index c08ac459de5..c63aca801a0 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { optional = true, workspace = true } diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index dddea4ffa23..abb16d163da 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-keystore = { path = "../keystore", default-features = false, optional = true } diff --git a/substrate/primitives/keystore/Cargo.toml b/substrate/primitives/keystore/Cargo.toml index 3f1a71b62ac..313b9e1c005 100644 --- a/substrate/primitives/keystore/Cargo.toml +++ b/substrate/primitives/keystore/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } parking_lot = { version = "0.12.1", default-features = false, optional = true } sp-core = { path = "../core", default-features = false } sp-externalities = { path = "../externalities", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 65d8bd79e5a..23efc1b687c 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } mmr-lib = { package = "ckb-merkle-mountain-range", git = "https://github.com/paritytech/merkle-mountain-range.git", branch = "master", default-features = false } diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index ca8408d0ad9..90ecd1dfb13 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 166609ad922..ef32503000d 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../api" } sp-application-crypto = { default-features = false, path = "../application-crypto" } diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index afa59af64d6..2da74429a48 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } diff --git a/substrate/primitives/runtime-interface/Cargo.toml b/substrate/primitives/runtime-interface/Cargo.toml index b4fab17eeb7..f853a532515 100644 --- a/substrate/primitives/runtime-interface/Cargo.toml +++ b/substrate/primitives/runtime-interface/Cargo.toml @@ -23,7 +23,7 @@ sp-std = { path = "../std", default-features = false } sp-tracing = { path = "../tracing", default-features = false } sp-runtime-interface-proc-macro = { path = "proc-macro" } sp-externalities = { path = "../externalities", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.12.0", default-features = false } sp-storage = { path = "../storage", default-features = false } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 0389c9f5b2f..4d298b7ce5e 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 5314ccd6d96..9355ab42010 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index e380abb6a8c..6e3ce4bca10 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], optional = true, workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index e00ff5c27dd..c383a17cb00 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } parking_lot = { version = "0.12.1", optional = true } diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index b36bff69a00..bb893b25dc4 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index acedc8d0004..c3318943d0d 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" serde = { features = ["alloc", "derive"], optional = true, workspace = true } diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 05135554315..b7be6148609 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 5a1d4fcc985..c1bf9b3255e 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index ce30302d4bb..8adec1670dc 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -21,7 +21,7 @@ features = ["with-tracing"] targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = [ +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = [ "derive", ] } tracing = { version = "0.1.29", default-features = false } diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 6cce469d3f9..1e874c3595a 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", optional = true, default-features = false } sp-inherents = { path = "../inherents", default-features = false } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 29c3c787087..45459c180d4 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -22,7 +22,7 @@ harness = false [dependencies] ahash = { version = "0.8.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } hash-db = { version = "0.16.0", default-features = false } lazy_static = { version = "1.4.0", optional = true } memory-db = { version = "0.32.0", default-features = false } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index d686b0c7551..f8ef8f66c53 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index f7abf88c9a6..3abd5c09106 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } proc-macro2 = "1.0.56" quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 15a20fab5e5..a0c8342d2d3 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { optional = true, workspace = true, default-features = true } wasmtime = { version = "8.0.1", default-features = false, optional = true } diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index e73d4a702b4..d2d72a7cb01 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index a5f000057de..5871f1bf5b4 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1568ee500bd..038076e10c5 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -21,7 +21,7 @@ sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../primitives/block-builder", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } sp-keyring = { path = "../../primitives/keyring", default-features = false } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 9b52706c739..360e2b7b810 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" parking_lot = "0.12.1" thiserror = { workspace = true } diff --git a/substrate/utils/fork-tree/Cargo.toml b/substrate/utils/fork-tree/Cargo.toml index 87135ef2afb..275f44623bd 100644 --- a/substrate/utils/fork-tree/Cargo.toml +++ b/substrate/utils/fork-tree/Cargo.toml @@ -17,4 +17,4 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index fa270759c91..7cfacdc2e5e 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.2.2" chrono = "0.4" clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } comfy-table = { version = "7.1.0", default-features = false } handlebars = "5.1.0" Inflector = "0.11.4" diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 82b01915483..2911d5eef65 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["http-client"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } log = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 3673b2790c5..ee3bf5eb68d 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../../../../primitives/core" } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 84db06da7b0..bf566f909ec 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22", features = ["jsonrpsee-types"] } serde = { workspace = true, default-features = true } frame-support = { path = "../../../../frame/support" } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 3e623daa14b..6829d753ed7 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } futures = "0.3.30" log = { workspace = true, default-features = true } diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 909ba034454..e6fe43abc09 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ], default-features = false } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index ceac8a49853..99559308e5b 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -13,7 +13,7 @@ publish = false workspace = true [dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index ed857b4e4b9..6f715082982 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -19,7 +19,7 @@ workspace = true [dependencies] clap = { version = "4.5.3", features = ["derive"] } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.28" diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 199da2f12d2..c5334e871fa 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index d15ff2807a6..74b82f06e3a 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } hex-literal = { version = "0.4.1", optional = true } diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index 24519f1d22e..1a122bd82d4 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 7a81f192043..b4a543826e7 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ -- GitLab From 23c5bbc86dd540a7e5b430edd59f16a992035f4c Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 15 May 2024 18:07:36 +0200 Subject: [PATCH 014/106] [ci] Use default gh runners for small workloads (#4473) `ubuntu-latest` are free for public repo so it's better (and cheaper) to use them for small jobs --- .github/workflows/check-licenses.yml | 2 +- .github/workflows/checks-quick.yml | 19 ++++++++----------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 31716b1a04a..3bc95305f74 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -9,7 +9,7 @@ permissions: jobs: check-licenses: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 env: LICENSES: "'Apache-2.0' 'GPL-3.0-only' 'GPL-3.0-or-later WITH Classpath-exception-2.0'" diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 1fbf83e3465..217adf40a39 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -19,7 +19,7 @@ jobs: # GitHub Actions allows using 'env' in a container context. # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -29,7 +29,7 @@ jobs: - id: set_image run: cat .github/env >> $GITHUB_OUTPUT fmt: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 needs: [set-image] container: @@ -39,11 +39,8 @@ jobs: - name: Cargo fmt run: cargo +nightly fmt --all -- --check check-dependency-rules: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 - # needs: [set-image] - # container: - # image: ${{ needs.set-image.outputs.IMAGE }} steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: check dependency rules @@ -51,7 +48,7 @@ jobs: cd substrate/ ../.gitlab/ensure-deps.sh check-rust-feature-propagation: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 needs: [set-image] container: @@ -61,7 +58,7 @@ jobs: - name: run zepter run: zepter run check test-rust-features: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 needs: [set-image] container: @@ -71,7 +68,7 @@ jobs: - name: run rust features run: bash .gitlab/rust-features.sh . check-toml-format: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 needs: [set-image] container: @@ -83,7 +80,7 @@ jobs: taplo format --check --config .config/taplo.toml echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" check-workspace: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) @@ -98,7 +95,7 @@ jobs: "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" check-markdown: - runs-on: arc-runners-polkadot-sdk + runs-on: ubuntu-latest timeout-minutes: 10 steps: - name: Checkout sources -- GitLab From 3a20232337410c36eeb9929136f96b9a703ccdf8 Mon Sep 17 00:00:00 2001 From: Evgeny Snitko Date: Wed, 15 May 2024 21:59:09 +0400 Subject: [PATCH 015/106] Include RUNTIME_METADATA_HASH in cache key (#4476) Add RUNTIME_METADATA_HASH variable to [`cache.extraEnv`](https://github.com/paritytech/forklift/tree/0.12.4?tab=readme-ov-file#cache) in config.toml (setting available from forklift 0.12.4) --- .forklift/config.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.forklift/config.toml b/.forklift/config.toml index 403a452aa03..ab3b2729a46 100644 --- a/.forklift/config.toml +++ b/.forklift/config.toml @@ -10,6 +10,9 @@ jobsBlackList = [] logLevel = "warn" threadsCount = 6 +[cache] +extraEnv = ["RUNTIME_METADATA_HASH"] + [metrics] enabled = true pushEndpoint = "placeholder" -- GitLab From d237adfb13ceab8724fabee74469788e19e0e6af Mon Sep 17 00:00:00 2001 From: Milos Kriz <82968568+miloskriz@users.noreply.github.com> Date: Wed, 15 May 2024 21:47:26 +0100 Subject: [PATCH 016/106] Add IBP bootnodes to `coretime-westend` and `coretime-kusama` and others (#4276) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dear team, hello, Please consider this request to add bootnode endpoints to the chainspecs of `coretime-westend` and `coretime-kusama`. These bootnodes are operated by the Infrastructure Builders Programme as part of the commitments of their members. All these endpoints have been tested according to the procedure indicated [here](https://wiki.ibp.network/docs/consumers/bootnodes#testing-a-bootnode). Additionally, please also consider some maintenance changes to Gatotech's bootnode endpoint in several other chains. Many thanks!! Best regrds! **_Milos_** --------- Co-authored-by: hitchhooker Co-authored-by: Bastian Kรถcher --- .../chain-specs/asset-hub-kusama.json | 4 ++-- .../chain-specs/asset-hub-polkadot.json | 4 ++-- .../chain-specs/asset-hub-westend.json | 4 ++-- .../chain-specs/bridge-hub-kusama.json | 4 ++-- .../chain-specs/bridge-hub-polkadot.json | 6 ++++-- .../chain-specs/bridge-hub-westend.json | 6 ++++-- .../chain-specs/collectives-polkadot.json | 4 ++-- .../chain-specs/collectives-westend.json | 4 ++-- .../chain-specs/coretime-kusama.json | 18 +++++++++++++++++- .../chain-specs/coretime-westend.json | 14 +++++++++++++- .../parachains/chain-specs/people-westend.json | 2 ++ polkadot/node/service/chain-specs/kusama.json | 4 ++-- polkadot/node/service/chain-specs/paseo.json | 2 ++ .../node/service/chain-specs/polkadot.json | 4 ++-- polkadot/node/service/chain-specs/westend.json | 4 ++-- 15 files changed, 60 insertions(+), 24 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 66a705a4086..00e342381ee 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -11,8 +11,8 @@ "/dns/boot.stake.plus/tcp/34334/wss/p2p/12D3KooWAzSSZ7jLqMw1WPomYEKCYANQaKemXQ8BKoFvNEvfmdqR", "/dns/boot.metaspan.io/tcp/26052/p2p/12D3KooW9z9hKqe3mqYAp5UJMhZiCqhkTHyiR43fegnGmTJ3JAba", "/dns/boot.metaspan.io/tcp/26056/wss/p2p/12D3KooW9z9hKqe3mqYAp5UJMhZiCqhkTHyiR43fegnGmTJ3JAba", - "/dns/boot-cr.gatotech.network/tcp/33210/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", - "/dns/boot-cr.gatotech.network/tcp/35210/wss/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", + "/dns/boot.gatotech.network/tcp/33210/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", + "/dns/boot.gatotech.network/tcp/35210/wss/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", "/dns/statemine-bootnode.turboflakes.io/tcp/30320/p2p/12D3KooWN2Qqvp5wWgjbBMpbqhKgvSibSHfomP5VWVD9VCn3VrV4", "/dns/statemine-bootnode.turboflakes.io/tcp/30420/wss/p2p/12D3KooWN2Qqvp5wWgjbBMpbqhKgvSibSHfomP5VWVD9VCn3VrV4", "/dns/boot-node.helikon.io/tcp/10210/p2p/12D3KooWFXRQce3aMgZMn5SxvHtYH4PsR63TZLf8LrnBsEVTyzdr", diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 16caa52ba91..22b11757b66 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -11,8 +11,8 @@ "/dns/boot.stake.plus/tcp/35334/wss/p2p/12D3KooWFrQjYaPZSSLLxEVmoaHFcrF6VoY4awG4KRSLaqy3JCdQ", "/dns/boot.metaspan.io/tcp/16052/p2p/12D3KooWLwiJuvqQUB4kYaSjLenFKH9dWZhGZ4qi7pSb3sUYU651", "/dns/boot.metaspan.io/tcp/16056/wss/p2p/12D3KooWLwiJuvqQUB4kYaSjLenFKH9dWZhGZ4qi7pSb3sUYU651", - "/dns/boot-cr.gatotech.network/tcp/33110/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", - "/dns/boot-cr.gatotech.network/tcp/35110/wss/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", + "/dns/boot.gatotech.network/tcp/33110/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", + "/dns/boot.gatotech.network/tcp/35110/wss/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", "/dns/statemint-bootnode.turboflakes.io/tcp/30315/p2p/12D3KooWL8CyLww3m3pRySQGGYGNJhWDMqko3j5xi67ckP7hDUvo", "/dns/statemint-bootnode.turboflakes.io/tcp/30415/wss/p2p/12D3KooWL8CyLww3m3pRySQGGYGNJhWDMqko3j5xi67ckP7hDUvo", "/dns/boot-node.helikon.io/tcp/10220/p2p/12D3KooW9uybhguhDjVJc3U3kgZC3i8rWmAnSpbnJkmuR7C6ZsRW", diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 3752213e702..830eb2c5918 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -13,8 +13,8 @@ "/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", "/dns/boot.metaspan.io/tcp/36056/wss/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", - "/dns/boot-cr.gatotech.network/tcp/33310/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", - "/dns/boot-cr.gatotech.network/tcp/35310/wss/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", + "/dns/boot.gatotech.network/tcp/33310/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", + "/dns/boot.gatotech.network/tcp/35310/wss/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", "/dns/westmint-bootnode.turboflakes.io/tcp/30325/p2p/12D3KooWHU4qqSyqKdbXdrCTMXUJxxueaZjqpqSaQqYiFPw6XqEx", "/dns/westmint-bootnode.turboflakes.io/tcp/30425/wss/p2p/12D3KooWHU4qqSyqKdbXdrCTMXUJxxueaZjqpqSaQqYiFPw6XqEx", "/dns/boot-node.helikon.io/tcp/10200/p2p/12D3KooWMRY8wb7rMT81LLuivvsy6ahUxKHQgYJw4zm1hC1uYLxb", diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 6644ea41ab7..46b33ed44c1 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -11,8 +11,8 @@ "/dns/boot.stake.plus/tcp/41334/wss/p2p/12D3KooWBzbs2jsXjG5dipktGPKaUm9XWvkmeJFsEAGkVt946Aa7", "/dns/boot.metaspan.io/tcp/26032/p2p/12D3KooWKfuSaZrLNz43PDgM4inMALXRHTSh2WBuqQtZRq8zmT1Z", "/dns/boot.metaspan.io/tcp/26036/wss/p2p/12D3KooWKfuSaZrLNz43PDgM4inMALXRHTSh2WBuqQtZRq8zmT1Z", - "/dns/boot-cr.gatotech.network/tcp/33230/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", - "/dns/boot-cr.gatotech.network/tcp/35230/wss/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", + "/dns/boot.gatotech.network/tcp/33230/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", + "/dns/boot.gatotech.network/tcp/35230/wss/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", "/dns/bridge-hub-kusama-bootnode.turboflakes.io/tcp/30615/p2p/12D3KooWE3dJXbwA5SQqbDNxHfj7BXJRcy2KiXWjJY4VUMKoa7S2", "/dns/bridge-hub-kusama-bootnode.turboflakes.io/tcp/30715/wss/p2p/12D3KooWE3dJXbwA5SQqbDNxHfj7BXJRcy2KiXWjJY4VUMKoa7S2", "/dns/boot-node.helikon.io/tcp/10250/p2p/12D3KooWDJLkhqQdXcVKWX7CqJHnpAY6PzrPc4ZG2CUWnARbmguy", diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index c51c5eff89b..0a642caddb7 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -9,8 +9,10 @@ "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", "/dns/polkadot-bridge-hub-boot-ng.dwellir.com/tcp/30339/p2p/12D3KooWPZ38PL3PhRVcUVYDNn7nRcZF8MykmWWLBKeDV2yna1vV", "/dns/polkadot-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPZ38PL3PhRVcUVYDNn7nRcZF8MykmWWLBKeDV2yna1vV", - "/dns/boot-cr.gatotech.network/tcp/33130/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", - "/dns/boot-cr.gatotech.network/tcp/35130/wss/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", + "/dns/boot.gatotech.network/tcp/33130/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", + "/dns/boot.gatotech.network/tcp/35130/wss/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", + "/dns/boot.stake.plus/tcp/42333/p2p/12D3KooWEoTCu22Uab6prbfcD1FPpPZmfhkAVeMZQJ3fHnkCVmJz", + "/dns/boot.stake.plus/tcp/42334/wss/p2p/12D3KooWEoTCu22Uab6prbfcD1FPpPZmfhkAVeMZQJ3fHnkCVmJz", "/dns/bridge-hub-polkadot-bootnode.turboflakes.io/tcp/30610/p2p/12D3KooWNEgaQRQHJHvGDh8Rg4RyLmDCCz3yAf2gAdHZZJAUUD8Q", "/dns/bridge-hub-polkadot-bootnode.turboflakes.io/tcp/30710/wss/p2p/12D3KooWNEgaQRQHJHvGDh8Rg4RyLmDCCz3yAf2gAdHZZJAUUD8Q", "/dns/boot.metaspan.io/tcp/16032/p2p/12D3KooWQTfRnrK3FfbrotpSP5RVJbjBHVBSu8VSzhj9qcvjaqnZ", diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 5140071ec44..c07857894f7 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -11,8 +11,10 @@ "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", - "/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", - "/dns/boot-cr.gatotech.network/tcp/35330/wss/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", + "/dns/boot.stake.plus/tcp/40333/p2p/12D3KooWPGMsGPdGJx6HrByiKUyz91wgUHmjG5UXTmkJ9tUphAQn", + "/dns/boot.stake.plus/tcp/40334/wss/p2p/12D3KooWPGMsGPdGJx6HrByiKUyz91wgUHmjG5UXTmkJ9tUphAQn", + "/dns/boot.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", + "/dns/boot.gatotech.network/tcp/35330/wss/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", "/dns/bridge-hub-westend-bootnode.turboflakes.io/tcp/30620/p2p/12D3KooWLeExhPWCDUjcxCdzxTP5TpPbNBVG5t9MPvk1dZUM5naU", "/dns/bridge-hub-westend-bootnode.turboflakes.io/tcp/30720/wss/p2p/12D3KooWLeExhPWCDUjcxCdzxTP5TpPbNBVG5t9MPvk1dZUM5naU", "/dns/boot.metaspan.io/tcp/36032/p2p/12D3KooWPaLsu3buByBnGFQnp5UP4q1S652dGVft92TFeChizFir", diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index ce80e21ae62..b2f3ff812d0 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -11,8 +11,8 @@ "/dns/boot.stake.plus/tcp/37334/wss/p2p/12D3KooWRgFfEtwPo3xorKGYALRHRteKNgF37iN9q8xTLPYc34LA", "/dns/boot.metaspan.io/tcp/16072/p2p/12D3KooWJWTTu2t2yg5bFRH6tjEpfzKwZir5R9JRRjQpgFPXdDfp", "/dns/boot.metaspan.io/tcp/16076/wss/p2p/12D3KooWJWTTu2t2yg5bFRH6tjEpfzKwZir5R9JRRjQpgFPXdDfp", - "/dns/boot-cr.gatotech.network/tcp/33120/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", - "/dns/boot-cr.gatotech.network/tcp/35120/wss/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", + "/dns/boot.gatotech.network/tcp/33120/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", + "/dns/boot.gatotech.network/tcp/35120/wss/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", "/dns/collectives-polkadot-bootnode.turboflakes.io/tcp/30605/p2p/12D3KooWPyzM7eX64J4aG8uRfSARakDVtiEtthEM8FUjrLWAg2sC", "/dns/collectives-polkadot-bootnode.turboflakes.io/tcp/30705/wss/p2p/12D3KooWPyzM7eX64J4aG8uRfSARakDVtiEtthEM8FUjrLWAg2sC", "/dns/boot-node.helikon.io/tcp/10230/p2p/12D3KooWS8CBz4P5CBny9aBy2EQUvAExFo9PUVT57X8r3zWMFkXT", diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index fdd6348f02a..8680e3a7671 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -13,8 +13,8 @@ "/dns/boot.stake.plus/tcp/38334/wss/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", "/dns/boot.metaspan.io/tcp/36072/p2p/12D3KooWEf2QXWq5pAbFJLfbnexA7KYtRRDSPkqTP64n1KtdsdV2", "/dns/boot.metaspan.io/tcp/36076/wss/p2p/12D3KooWEf2QXWq5pAbFJLfbnexA7KYtRRDSPkqTP64n1KtdsdV2", - "/dns/boot-cr.gatotech.network/tcp/33320/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", - "/dns/boot-cr.gatotech.network/tcp/35320/wss/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", + "/dns/boot.gatotech.network/tcp/33320/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", + "/dns/boot.gatotech.network/tcp/35320/wss/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", "/dns/collectives-westend-bootnode.turboflakes.io/tcp/30600/p2p/12D3KooWAe9CFXp6je3TAPQJE135KRemTLSqEqQBZMFwJontrThZ", "/dns/collectives-westend-bootnode.turboflakes.io/tcp/30700/wss/p2p/12D3KooWAe9CFXp6je3TAPQJE135KRemTLSqEqQBZMFwJontrThZ", "/dns/boot-node.helikon.io/tcp/10260/p2p/12D3KooWMzfnt29VAmrJHQcJU6Vfn4RsMbqPqgyWHqt9VTTAbSrL", diff --git a/cumulus/parachains/chain-specs/coretime-kusama.json b/cumulus/parachains/chain-specs/coretime-kusama.json index c22daf54db2..4ebaab82e75 100644 --- a/cumulus/parachains/chain-specs/coretime-kusama.json +++ b/cumulus/parachains/chain-specs/coretime-kusama.json @@ -8,7 +8,23 @@ "/dns/kusama-coretime-connect-a-0.polkadot.io/tcp/443/wss/p2p/12D3KooWR7Biy6nPgQFhk2eYP62pAkcFA6he9RUFURTDh7ewTjpo", "/dns/kusama-coretime-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAGFiMZDF9RxdacrkenzGdo8nhfSe9EXofHc5mHeJ9vGX", "/dns/boot.metaspan.io/tcp/33024/p2p/12D3KooWPmwMhG54ixDv2b3sCfYEJ1DWDrjaduBCBwqFFdqvVsmS", - "/dns/boot.metaspan.io/tcp/33026/wss/p2p/12D3KooWPmwMhG54ixDv2b3sCfYEJ1DWDrjaduBCBwqFFdqvVsmS" + "/dns/boot.metaspan.io/tcp/33026/wss/p2p/12D3KooWPmwMhG54ixDv2b3sCfYEJ1DWDrjaduBCBwqFFdqvVsmS", + "/dns/boot.stake.plus/tcp/47333/p2p/12D3KooWKKKoyywqdkkpZzCzVWt5VXEk5PbS9tUm635L5ohyf8bU", + "/dns/boot.stake.plus/tcp/47334/wss/p2p/12D3KooWKKKoyywqdkkpZzCzVWt5VXEk5PbS9tUm635L5ohyf8bU", + "/dns/coretime-kusama-boot-ng.dwellir.com/tcp/30358/p2p/12D3KooWSoPisbYQTAj79Dtsxx1qAiEFTouvXCfNJ1A3SQWQzuct", + "/dns/coretime-kusama-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWSoPisbYQTAj79Dtsxx1qAiEFTouvXCfNJ1A3SQWQzuct", + "/dns/boot.gatotech.network/tcp/33250/p2p/12D3KooWMpgcWr5pb7em7rWaQV4J6P2kn3YCjCeP1ESMsJPffn1a", + "/dns/boot.gatotech.network/tcp/35250/wss/p2p/12D3KooWMpgcWr5pb7em7rWaQV4J6P2kn3YCjCeP1ESMsJPffn1a", + "/dns/kcore16.rotko.net/tcp/33726/p2p/12D3KooWCyPSkk5cq2eEdw1qHizfa6UT4QggSarCEtcvNXpnod8B", + "/dns/kcore16.rotko.net/tcp/35726/wss/p2p/12D3KooWCyPSkk5cq2eEdw1qHizfa6UT4QggSarCEtcvNXpnod8B", + "/dns/coretime-kusama-bootnode.turboflakes.io/tcp/30660/p2p/12D3KooWHTr9GLvJEnGYKCu3FHC3DwqBiFg9MQUWsjPCP4YH5xyf", + "/dns/coretime-kusama-bootnode.turboflakes.io/tcp/30760/wss/p2p/12D3KooWHTr9GLvJEnGYKCu3FHC3DwqBiFg9MQUWsjPCP4YH5xyf", + "/dns/coretime-kusama.bootnodes.polkadotters.com/tcp/30371/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/coretime-kusama.bootnodes.polkadotters.com/tcp/30373/wss/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/boot-node.helikon.io/tcp/7420/p2p/12D3KooWK4eKFpYftyuLdBdXrkdJXHKt7KZcNLb92Ufkvo17B9T2", + "/dns/boot-node.helikon.io/tcp/7422/wss/p2p/12D3KooWK4eKFpYftyuLdBdXrkdJXHKt7KZcNLb92Ufkvo17B9T2", + "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", + "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index 74edd5b2cd9..586879b9abc 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -16,7 +16,19 @@ "/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/coretime-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw", - "/dns/coretime-westend-boot-ng.dwellir.com/tcp/30356/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw" + "/dns/coretime-westend-boot-ng.dwellir.com/tcp/30356/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw", + "/dns/boot.stake.plus/tcp/45333/p2p/12D3KooWEFQapPJXNyZMt892qXZ8YgDuHWt2vhLeRvny98oUjEto", + "/dns/boot.stake.plus/tcp/45334/wss/p2p/12D3KooWEFQapPJXNyZMt892qXZ8YgDuHWt2vhLeRvny98oUjEto", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", + "/dns/ibp-boot-westend-coretime.luckyfriday.io/tcp/443/wss/p2p/12D3KooWBzfzNhvyRVTb9KtNYpkRf26yTRHorBZR2LmYhH5ArCey", + "/dns/ibp-boot-westend-coretime.luckyfriday.io/tcp/30340/p2p/12D3KooWBzfzNhvyRVTb9KtNYpkRf26yTRHorBZR2LmYhH5ArCey", + "/dns/wcore16.rotko.net/tcp/33736/p2p/12D3KooWFmGg7EGzxGDawuJ9EfyEznCrZfMJgGa4eHpMWjcJmg85", + "/dns/wcore16.rotko.net/tcp/35736/wss/p2p/12D3KooWFmGg7EGzxGDawuJ9EfyEznCrZfMJgGa4eHpMWjcJmg85", + "/dns/boot.gatotech.network/tcp/33350/p2p/12D3KooWN6FJDaZvWbtX1pSc6UdHgyF2UZtYxPp3UkXQZa8ko7uS", + "/dns/boot.gatotech.network/tcp/35350/wss/p2p/12D3KooWN6FJDaZvWbtX1pSc6UdHgyF2UZtYxPp3UkXQZa8ko7uS", + "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30358/wss/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", + "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30356/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index 93b8c064113..26e165b4839 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -25,6 +25,8 @@ "/dns/people-westend.bootnode.amforc.com/tcp/30346/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", "/dns/people-westend-bootnode.turboflakes.io/tcp/30650/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", "/dns/people-westend-bootnode.turboflakes.io/tcp/30750/wss/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", + "/dns/wppl16.rotko.net/tcp/33766/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", + "/dns/wppl16.rotko.net/tcp/35766/wss/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", "/dns/people-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX" ], diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 23094360866..899b302155f 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -18,8 +18,8 @@ "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", - "/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", - "/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", + "/dns/boot.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", + "/dns/boot.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", "/dns/boot.metaspan.io/tcp/23015/ws/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", "/dns/boot.metaspan.io/tcp/23016/wss/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index 19eefd32899..5a67ddcd4c4 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -17,6 +17,8 @@ "/dns/boot.gatotech.network/tcp/35400/wss/p2p/12D3KooWEvz5Ygv3MhCUNTVQbUTVhzhvf4KKcNoe5M5YbVLPBeeW", "/dns/paseo-bootnode.turboflakes.io/tcp/30630/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e", "/dns/paseo-bootnode.turboflakes.io/tcp/30730/wss/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e", + "/dns/pso16.rotko.net/tcp/33246/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", + "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", "/dns/paseo-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBLLFKDGBxCwq3QmU3YwWKXUx953WwprRshJQicYu4Cfr", "/dns/paseo-boot-ng.dwellir.com/tcp/30354/p2p/12D3KooWBLLFKDGBxCwq3QmU3YwWKXUx953WwprRshJQicYu4Cfr" ], diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index bf0599f0bdc..04def54f794 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -19,8 +19,8 @@ "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", - "/dns/boot-cr.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", - "/dns/boot-cr.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", + "/dns/boot.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", + "/dns/boot.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot.metaspan.io/tcp/13012/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", "/dns/boot.metaspan.io/tcp/13015/ws/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", "/dns/boot.metaspan.io/tcp/13016/wss/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 9dfc715df46..16bc7ff07b0 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -16,8 +16,8 @@ "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", - "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", - "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", + "/dns/boot.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", + "/dns/boot.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", "/dns/boot.metaspan.io/tcp/33015/ws/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", "/dns/boot.metaspan.io/tcp/33016/wss/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", -- GitLab From 289f5bbf7a45dc0380904a435464b15ec711ed03 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Thu, 16 May 2024 02:01:49 +0200 Subject: [PATCH 017/106] XCM Cookbook (#2633) # Context XCM docs are currently an md book hosted with github pages: https://paritytech.github.io/xcm-docs/. While that's fine, it's not in line with the work being done in the polkadot-sdk docs. # Main addition This PR aims to fix that by bringing the docs back to this repo. This does not have all the information currently present in the mdbook xcm-docs but aims to be a good chunk of it and fully replace it over time. I also added the sections `guides` and `cookbook` which will be very useful for users wanting to get into XCM. For now I only added one example to the cookbook, but have ideas for guides and more examples. Having this docs be in rust docs is very useful for the cookbook. # TODO - [x] Use `FungibleAdapter` - [x] Improve and relocate mock message queue - [x] Fix license issue. Why does docs/sdk/ not have this problem? (Just added the licenses) # Next steps - More examples in the cookbook - End-to-end XCM guide with zombienet testing --------- Co-authored-by: command-bot <> Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- Cargo.lock | 62 +++++- Cargo.toml | 1 + polkadot/xcm/docs/Cargo.toml | 39 ++++ .../xcm/docs/mermaid/location_hierarchy.mmd | 9 + polkadot/xcm/docs/mermaid/structure.mmd | 4 + .../xcm/docs/mermaid/transport_protocols.mmd | 6 + .../xcm/docs/mermaid/universal_location.mmd | 3 + polkadot/xcm/docs/mermaid/usdt_location.mmd | 6 + polkadot/xcm/docs/src/cookbook/mod.rs | 27 +++ .../cookbook/relay_token_transactor/mod.rs | 51 +++++ .../relay_token_transactor/network.rs | 90 +++++++++ .../relay_token_transactor/parachain/mod.rs | 56 ++++++ .../parachain/xcm_config.rs | 189 ++++++++++++++++++ .../relay_token_transactor/relay_chain/mod.rs | 103 ++++++++++ .../relay_chain/xcm_config.rs | 163 +++++++++++++++ .../cookbook/relay_token_transactor/tests.rs | 128 ++++++++++++ polkadot/xcm/docs/src/fundamentals.rs | 177 ++++++++++++++++ polkadot/xcm/docs/src/glossary.rs | 123 ++++++++++++ polkadot/xcm/docs/src/guides/mod.rs | 25 +++ polkadot/xcm/docs/src/lib.rs | 63 ++++++ polkadot/xcm/xcm-simulator/Cargo.toml | 4 + .../example/src/parachain/mod.rs | 6 +- .../src/parachain/xcm_config/constants.rs | 5 +- .../xcm/xcm-simulator/example/src/tests.rs | 6 +- polkadot/xcm/xcm-simulator/src/lib.rs | 4 + .../mock_message_queue.rs} | 79 ++++---- 26 files changed, 1381 insertions(+), 48 deletions(-) create mode 100644 polkadot/xcm/docs/Cargo.toml create mode 100644 polkadot/xcm/docs/mermaid/location_hierarchy.mmd create mode 100644 polkadot/xcm/docs/mermaid/structure.mmd create mode 100644 polkadot/xcm/docs/mermaid/transport_protocols.mmd create mode 100644 polkadot/xcm/docs/mermaid/universal_location.mmd create mode 100644 polkadot/xcm/docs/mermaid/usdt_location.mmd create mode 100644 polkadot/xcm/docs/src/cookbook/mod.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs create mode 100644 polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs create mode 100644 polkadot/xcm/docs/src/fundamentals.rs create mode 100644 polkadot/xcm/docs/src/glossary.rs create mode 100644 polkadot/xcm/docs/src/guides/mod.rs create mode 100644 polkadot/xcm/docs/src/lib.rs rename polkadot/xcm/xcm-simulator/{example/src/parachain/mock_msg_queue.rs => src/mock_message_queue.rs} (72%) diff --git a/Cargo.lock b/Cargo.lock index ded2cc53293..642fe88db00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8409,7 +8409,7 @@ dependencies = [ "pallet-minimal-template", "polkadot-sdk-docs", "polkadot-sdk-frame", - "simple-mermaid", + "simple-mermaid 0.1.1", ] [[package]] @@ -13900,7 +13900,7 @@ dependencies = [ "sc-rpc-api", "sc-service", "scale-info", - "simple-mermaid", + "simple-mermaid 0.1.1", "sp-api", "sp-arithmetic", "sp-core", @@ -18353,6 +18353,11 @@ dependencies = [ "bitflags 2.4.0", ] +[[package]] +name = "simple-mermaid" +version = "0.1.0" +source = "git+https://github.com/kianenigma/simple-mermaid.git?branch=main#e48b187bcfd5cc75111acd9d241f1bd36604344b" + [[package]] name = "simple-mermaid" version = "0.1.1" @@ -19645,7 +19650,7 @@ dependencies = [ "scale-info", "serde", "serde_json", - "simple-mermaid", + "simple-mermaid 0.1.1", "sp-api", "sp-application-crypto", "sp-arithmetic", @@ -21068,6 +21073,28 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +[[package]] +name = "test-log" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" +dependencies = [ + "env_logger 0.11.3", + "test-log-macros", + "tracing-subscriber 0.3.18", +] + +[[package]] +name = "test-log-macros" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" +dependencies = [ + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "test-parachain-adder" version = "1.0.0" @@ -23281,6 +23308,31 @@ dependencies = [ "libc", ] +[[package]] +name = "xcm-docs" +version = "0.1.0" +dependencies = [ + "docify", + "pallet-balances", + "pallet-message-queue", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "polkadot-sdk-frame", + "scale-info", + "simple-mermaid 0.1.0", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "test-log", + "xcm-simulator", +] + [[package]] name = "xcm-emulator" version = "0.5.0" @@ -23379,12 +23431,16 @@ name = "xcm-simulator" version = "7.0.0" dependencies = [ "frame-support", + "frame-system", "parity-scale-codec", "paste", "polkadot-core-primitives", "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-runtime-parachains", + "scale-info", "sp-io", + "sp-runtime", "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", diff --git a/Cargo.toml b/Cargo.toml index dcf410daa1f..1d02b701d23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,6 +218,7 @@ members = [ "polkadot/utils/generate-bags", "polkadot/utils/remote-ext-tests/bags-list", "polkadot/xcm", + "polkadot/xcm/docs", "polkadot/xcm/pallet-xcm", "polkadot/xcm/pallet-xcm-benchmarks", "polkadot/xcm/procedural", diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml new file mode 100644 index 00000000000..9820bd36dc0 --- /dev/null +++ b/polkadot/xcm/docs/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "xcm-docs" +description = "Documentation and guides for XCM" +version = "0.1.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true +authors.workspace = true +edition.workspace = true +publish = false + +[dependencies] +# For XCM stuff +xcm = { path = "../../xcm", package = "staging-xcm" } +xcm-executor = { path = "../../xcm/xcm-executor", package = "staging-xcm-executor" } +xcm-builder = { path = "../../xcm/xcm-builder", package = "staging-xcm-builder" } +xcm-simulator = { path = "../../xcm/xcm-simulator" } +pallet-xcm = { path = "../../xcm/pallet-xcm" } + +# For building FRAME runtimes +frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = ["experimental", "runtime"] } +codec = { package = "parity-scale-codec", version = "3.6.9" } +scale-info = { version = "2.6.0", default-features = false } +polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } +polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains" } +polkadot-primitives = { path = "../../../polkadot/primitives" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-std = { path = "../../../substrate/primitives/std" } +sp-io = { path = "../../../substrate/primitives/io" } + +# Some pallets +pallet-message-queue = { path = "../../../substrate/frame/message-queue" } +pallet-balances = { path = "../../../substrate/frame/balances" } + +# For building docs +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } +docify = "0.2.6" + +[dev-dependencies] +test-log = "0.2.14" diff --git a/polkadot/xcm/docs/mermaid/location_hierarchy.mmd b/polkadot/xcm/docs/mermaid/location_hierarchy.mmd new file mode 100644 index 00000000000..54fcfc8072a --- /dev/null +++ b/polkadot/xcm/docs/mermaid/location_hierarchy.mmd @@ -0,0 +1,9 @@ +flowchart + relay[Relaychain] --> paraA["Parachain(1000)"] + relay --> paraB["Parachain(2000)"] + + paraA --> pallet[Pallet] + pallet --> indexA[Index 1] + pallet --> indexB[Index 2] + + paraA --> account[Account] diff --git a/polkadot/xcm/docs/mermaid/structure.mmd b/polkadot/xcm/docs/mermaid/structure.mmd new file mode 100644 index 00000000000..17f60467241 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/structure.mmd @@ -0,0 +1,4 @@ +flowchart + docs[xcm_docs] --> fundamentals + docs --> guides + docs --> cookbook diff --git a/polkadot/xcm/docs/mermaid/transport_protocols.mmd b/polkadot/xcm/docs/mermaid/transport_protocols.mmd new file mode 100644 index 00000000000..c0340db0651 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/transport_protocols.mmd @@ -0,0 +1,6 @@ +flowchart + relay[Relaychain] --"DMP"--> paraA["Parachain(2000)"] + relay --"DMP"--> paraB["Parachain(2001)"] + + paraA --"UMP"--> relay + paraB --"UMP"--> relay diff --git a/polkadot/xcm/docs/mermaid/universal_location.mmd b/polkadot/xcm/docs/mermaid/universal_location.mmd new file mode 100644 index 00000000000..97bfa747319 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/universal_location.mmd @@ -0,0 +1,3 @@ +flowchart + universe[Universal Location] --> polkadot[Polkadot] + universe --> ethereum[Ethereum] diff --git a/polkadot/xcm/docs/mermaid/usdt_location.mmd b/polkadot/xcm/docs/mermaid/usdt_location.mmd new file mode 100644 index 00000000000..5e9222f6098 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/usdt_location.mmd @@ -0,0 +1,6 @@ +flowchart + relay[Polkadot] --> assetHub["Asset Hub"] + relay --> anotherPara["Another parachain"] + + assetHub --> assetsPallet["Assets Pallet"] + assetsPallet --> usdt[1984] diff --git a/polkadot/xcm/docs/src/cookbook/mod.rs b/polkadot/xcm/docs/src/cookbook/mod.rs new file mode 100644 index 00000000000..1c69bf0ead6 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/mod.rs @@ -0,0 +1,27 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Cookbook +//! +//! A collection of XCM recipes. +//! +//! Each recipe is tested and explains all the code necessary to run it -- they're not just snippets +//! to copy and paste. + +/// Configuring a parachain that only uses the Relay Chain native token. +/// In the case of Polkadot, this recipe will show you how to launch a parachain with no native +/// token -- dealing only on DOT. +pub mod relay_token_transactor; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs new file mode 100644 index 00000000000..279dd71a35f --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs @@ -0,0 +1,51 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Relay Asset Transactor +//! +//! This example shows how to configure a parachain to only deal with the Relay Chain token. +//! +//! The first step is using the [`xcm_builder::FungibleAdapter`] to create an `AssetTransactor` that +//! can handle the relay chain token. +#![doc = docify::embed!("src/cookbook/relay_token_transactor/parachain/xcm_config.rs", asset_transactor)] +//! +//! The second step is to configure `IsReserve` to recognize the relay chain as a reserve for its +//! own asset. +//! With this, you'll be able to easily mint a derivative asset, backed one-to-one from the Relay +//! Chain, by using the xcm pallet's `transfer_assets` extrinsic. +//! +//! The `IsReserve` type takes a type that implements `ContainsPair`. +//! In this case, we want a type that contains the pair `(relay_chain_native_token, relay_chain)`. +#![doc = docify::embed!("src/cookbook/relay_token_transactor/parachain/xcm_config.rs", is_reserve)] +//! +//! With this setup, we are able to do a reserve asset transfer to and from the parachain and relay +//! chain. +#![doc = docify::embed!("src/cookbook/relay_token_transactor/tests.rs", reserve_asset_transfers_work)] +//! +//! For the rest of the code, be sure to check the contents of this module. + +/// The parachain runtime for this example +pub mod parachain; + +/// The relay chain runtime for this example. +pub mod relay_chain; + +/// The network for this example. +pub mod network; + +/// Tests for this example. +#[cfg(test)] +pub mod tests; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs new file mode 100644 index 00000000000..46ac0e5df63 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs @@ -0,0 +1,90 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mock network + +use frame::deps::{ + frame_system, + sp_io::TestExternalities, + sp_runtime::{AccountId32, BuildStorage}, +}; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +use super::{parachain, relay_chain}; + +pub const ALICE: AccountId32 = AccountId32::new([0u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([1u8; 32]); +pub const UNITS: u64 = 10_000_000_000; +pub const CENTS: u64 = 100_000_000; +pub const INITIAL_BALANCE: u64 = UNITS; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MessageQueue, + DmpMessageHandler = parachain::MessageQueue, + new_ext = para_ext(), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + RuntimeCall = relay_chain::RuntimeCall, + RuntimeEvent = relay_chain::RuntimeEvent, + XcmConfig = relay_chain::XcmConfig, + MessageQueue = relay_chain::MessageQueue, + System = relay_chain::System, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (2222, ParaA), + ], + } +} + +pub fn para_ext() -> TestExternalities { + use parachain::{MessageQueue, Runtime, System}; + + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = frame::deps::sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + MessageQueue::set_para_id(2222.into()); + }); + ext +} + +pub fn relay_ext() -> TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { balances: vec![(ALICE, INITIAL_BALANCE)] } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + }); + ext +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs new file mode 100644 index 00000000000..e3fdda2e733 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs @@ -0,0 +1,56 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Runtime + +use frame::{deps::frame_system, prelude::*, runtime::prelude::*, traits::IdentityLookup}; +use xcm_executor::XcmExecutor; +use xcm_simulator::mock_message_queue; + +mod xcm_config; +use xcm_config::XcmConfig; + +pub type Block = frame_system::mocking::MockBlock; +pub type AccountId = frame::deps::sp_runtime::AccountId32; +pub type Balance = u64; + +construct_runtime! { + pub struct Runtime { + System: frame_system, + MessageQueue: mock_message_queue, + Balances: pallet_balances, + XcmPallet: pallet_xcm, + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type AccountData = pallet_balances::AccountData; +} + +impl mock_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type AccountStore = System; +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs new file mode 100644 index 00000000000..99f17693093 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs @@ -0,0 +1,189 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Configuration + +use frame::{ + deps::frame_system, + runtime::prelude::*, + traits::{Everything, Nothing}, +}; +use xcm::v4::prelude::*; +use xcm_builder::{ + AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + SignedToAccountId32, +}; +use xcm_executor::XcmExecutor; + +use super::{AccountId, Balances, MessageQueue, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin}; + +parameter_types! { + pub RelayLocation: Location = Location::parent(); + pub ThisNetwork: NetworkId = NetworkId::Polkadot; +} + +pub type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +/// Configuration related to asset transactors +#[docify::export] +mod asset_transactor { + use super::*; + + parameter_types! { + pub ParentRelayLocation: Location = Location::parent(); + } + + /// AssetTransactor for handling the relay chain token + pub type FungibleTransactor = FungibleAdapter< + // Use this implementation of the `fungible::*` traits. + // `Balances` is the name given to the balances pallet in this particular recipe. + // Any implementation of the traits would suffice. + Balances, + // This transactor deals with the native token of the Relay Chain. + // This token is referenced by the Location of the Relay Chain relative to this chain + // -- Location::parent(). + IsConcrete, + // How to convert an XCM Location into a local account id. + // This is also something that's configured in the XCM executor. + LocationToAccountId, + // The type for account ids, only needed because `fungible` is generic over it. + AccountId, + // Not tracking teleports. + // This recipe only uses reserve asset transfers to handle the Relay Chain token. + (), + >; + + /// Actual configuration item that'll be set in the XCM config. + /// A tuple could be used here to have multiple transactors, each (potentially) handling + /// different assets. + /// In this recipe, we only have one. + pub type AssetTransactor = FungibleTransactor; +} + +/// Configuration related to token reserves +#[docify::export] +mod is_reserve { + use super::*; + + parameter_types! { + /// Reserves are specified using a pair `(AssetFilter, Location)`. + /// Each pair means that the specified Location is a reserve for all the assets in AssetsFilter. + /// Here, we are specifying that the Relay Chain is the reserve location for its native token. + pub RelayTokenForRelay: (AssetFilter, Location) = + (Wild(AllOf { id: AssetId(Parent.into()), fun: WildFungible }), Parent.into()); + } + + /// The wrapper type xcm_builder::Case is needed in order to use this in the configuration. + pub type IsReserve = xcm_builder::Case; +} + +mod weigher { + use super::*; + use xcm_builder::FixedWeightBounds; + + parameter_types! { + pub const WeightPerInstruction: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 100; + } + + pub type Weigher = FixedWeightBounds; +} + +parameter_types! { + pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::Polkadot), Parachain(2222)].into(); +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = (); + // The declaration of which Locations are reserves for which Assets. + type IsReserve = is_reserve::IsReserve; + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + // This is not safe, you should use `xcm_builder::AllowTopLevelPaidExecutionFrom` in a + // production chain + type Barrier = xcm_builder::AllowUnpaidExecutionFrom; + type Weigher = weigher::Weigher; + type Trader = (); + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = frame::traits::ConstU32<1>; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + // We turn off sending for these tests + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = super::super::network::ParachainXcmRouter; // Provided by xcm-simulator + // Anyone can execute XCM programs + type ExecuteXcmOrigin = EnsureXcmOrigin; + // We execute any type of program + type XcmExecuteFilter = Everything; + // How we execute programs + type XcmExecutor = XcmExecutor; + // We don't allow teleports + type XcmTeleportFilter = Nothing; + // We allow all reserve transfers + type XcmReserveTransferFilter = Everything; + // Same weigher executor uses to weigh XCM programs + type Weigher = weigher::Weigher; + // Same universal location + type UniversalLocation = UniversalLocation; + // No version discovery needed + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; + type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdminOrigin = frame_system::EnsureRoot; + // No locking + type TrustedLockers = (); + type MaxLockers = frame::traits::ConstU32<0>; + type MaxRemoteLockConsumers = frame::traits::ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + // How to turn locations into accounts + type SovereignAccountOf = LocationToAccountId; + // A currency to pay for things and its matcher, we are using the relay token + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + // Pallet benchmarks, no need for this recipe + type WeightInfo = pallet_xcm::TestWeightInfo; + // Runtime types + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs new file mode 100644 index 00000000000..25c35dd4aaa --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -0,0 +1,103 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame::{ + deps::{frame_support::weights::WeightMeter, sp_runtime::AccountId32}, + prelude::*, + runtime::prelude::*, + traits::{IdentityLookup, ProcessMessage, ProcessMessageError}, +}; +use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueueId}; +use xcm::v4::prelude::*; + +mod xcm_config; +pub use xcm_config::LocationToAccountId; +use xcm_config::XcmConfig; + +pub type AccountId = AccountId32; +pub type Balance = u64; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type AccountStore = System; +} + +type Block = frame_system::mocking::MockBlock; + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + pub MessageQueueServiceWeight: Weight = Weight::from_parts(1_000_000_000, 1_000_000); + pub const MessageQueueHeapSize: u32 = 65_536; + pub const MessageQueueMaxStale: u32 = 16; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type MessageProcessor = MessageProcessor; + type QueueChangeHandler = (); + type QueuePausedQuery = (); + type WeightInfo = (); + type IdleMaxServiceWeight = MessageQueueServiceWeight; +} + +construct_runtime! { + pub struct Runtime { + System: frame_system, + Balances: pallet_balances, + MessageQueue: pallet_message_queue, + XcmPallet: pallet_xcm, + } +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs new file mode 100644 index 00000000000..987bb3f9ab6 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs @@ -0,0 +1,163 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain XCM configuration + +use frame::{ + deps::frame_system, + runtime::prelude::*, + traits::{Everything, Nothing}, +}; +use xcm::v4::prelude::*; +use xcm_builder::{ + AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + SignedToAccountId32, +}; +use xcm_executor::XcmExecutor; + +use super::{AccountId, Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin}; + +parameter_types! { + pub HereLocation: Location = Location::here(); + pub ThisNetwork: NetworkId = NetworkId::Polkadot; +} + +/// Converter from XCM Locations to accounts. +/// This generates sovereign accounts for Locations and converts +/// local AccountId32 junctions to local accounts. +pub type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +mod asset_transactor { + use super::*; + + /// AssetTransactor for handling the Relay Chain token. + pub type FungibleTransactor = FungibleAdapter< + // Use this `fungible` implementation. + Balances, + // This transactor handles the native token. + IsConcrete, + // How to convert an XCM Location into a local account id. + // Whenever assets are handled, the location is turned into an account. + // This account is the one where balances are withdrawn/deposited. + LocationToAccountId, + // The account id type, needed because `fungible` is generic over it. + AccountId, + // Not tracking teleports. + (), + >; + + /// All asset transactors, in this case only one + pub type AssetTransactor = FungibleTransactor; +} + +mod weigher { + use super::*; + use xcm_builder::FixedWeightBounds; + + parameter_types! { + pub const WeightPerInstruction: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 100; + } + + pub type Weigher = FixedWeightBounds; +} + +parameter_types! { + pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::Polkadot)].into(); +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = (); + // We don't need to recognize anyone as a reserve + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + // This is not safe, you should use `xcm_builder::AllowTopLevelPaidExecutionFrom` in a + // production chain + type Barrier = xcm_builder::AllowUnpaidExecutionFrom; + type Weigher = weigher::Weigher; + type Trader = (); + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = frame::traits::ConstU32<1>; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + // No one can call `send` + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = super::super::network::RelayChainXcmRouter; // Provided by xcm-simulator + // Anyone can execute XCM programs + type ExecuteXcmOrigin = EnsureXcmOrigin; + // We execute any type of program + type XcmExecuteFilter = Everything; + // How we execute programs + type XcmExecutor = XcmExecutor; + // We don't allow teleports + type XcmTeleportFilter = Nothing; + // We allow all reserve transfers. + // This is so it can act as a reserve for its native token. + type XcmReserveTransferFilter = Everything; + // Same weigher executor uses to weigh XCM programs + type Weigher = weigher::Weigher; + // Same universal location + type UniversalLocation = UniversalLocation; + // No version discovery needed + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; + type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdminOrigin = frame_system::EnsureRoot; + // No locking + type TrustedLockers = (); + type MaxLockers = frame::traits::ConstU32<0>; + type MaxRemoteLockConsumers = frame::traits::ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + // How to turn locations into accounts + type SovereignAccountOf = LocationToAccountId; + // A currency to pay for things and its matcher, we are using the relay token + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + // Pallet benchmarks, no need for this example + type WeightInfo = pallet_xcm::TestWeightInfo; + // Runtime types + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs new file mode 100644 index 00000000000..792cf6149e7 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs @@ -0,0 +1,128 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame::testing_prelude::*; +use test_log::test; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; +use xcm_simulator::TestExt; + +use super::{ + network::{MockNet, ParaA, Relay, ALICE, BOB, CENTS, INITIAL_BALANCE}, + parachain, relay_chain, +}; + +#[docify::export] +#[test] +fn reserve_asset_transfers_work() { + // Scenario: + // ALICE on the relay chain holds some of Relay Chain's native tokens. + // She transfers them to BOB's account on the parachain using a reserve transfer. + // BOB receives Relay Chain native token derivatives on the parachain, + // which are backed one-to-one with the real tokens on the Relay Chain. + // + // NOTE: We could've used ALICE on both chains because it's a different account, + // but using ALICE and BOB makes it clearer. + + // We restart the mock network. + MockNet::reset(); + + // ALICE starts with INITIAL_BALANCE on the relay chain + Relay::execute_with(|| { + assert_eq!(relay_chain::Balances::free_balance(&ALICE), INITIAL_BALANCE); + }); + + // BOB starts with 0 on the parachain + ParaA::execute_with(|| { + assert_eq!(parachain::Balances::free_balance(&BOB), 0); + }); + + // ALICE on the Relay Chain sends some Relay Chain native tokens to BOB on the parachain. + // The transfer is done with the `transfer_assets` extrinsic in the XCM pallet. + // The extrinsic figures out it should do a reserve asset transfer + // with the local chain as reserve. + Relay::execute_with(|| { + // The parachain id is specified in the network.rs file in this recipe. + let destination: Location = Parachain(2222).into(); + let beneficiary: Location = + AccountId32 { id: BOB.clone().into(), network: Some(NetworkId::Polkadot) }.into(); + // We need to use `u128` here for the conversion to work properly. + // If we don't specify anything, it will be a `u64`, which the conversion + // will turn into a non-fungible token instead of a fungible one. + let assets: Assets = (Here, 50u128 * CENTS as u128).into(); + assert_ok!(relay_chain::XcmPallet::transfer_assets( + relay_chain::RuntimeOrigin::signed(ALICE), + Box::new(VersionedLocation::V4(destination.clone())), + Box::new(VersionedLocation::V4(beneficiary)), + Box::new(VersionedAssets::V4(assets)), + 0, + WeightLimit::Unlimited, + )); + + // ALICE now has less Relay Chain tokens. + assert_eq!(relay_chain::Balances::free_balance(&ALICE), INITIAL_BALANCE - 50 * CENTS); + + // The funds of the sovereign account of the parachain increase by 50 cents, + // the ones transferred over to BOB. + // The funds in this sovereign account represent how many Relay Chain tokens + // have been sent to this parachain. + // If the parachain wants to send those assets somewhere else they have to go + // via the reserve, and this balance is updated accordingly. + // This is why the derivatives are backed one-to-one. + let parachains_sovereign_account = + relay_chain::LocationToAccountId::convert_location(&destination).unwrap(); + assert_eq!(relay_chain::Balances::free_balance(parachains_sovereign_account), 50 * CENTS); + }); + + ParaA::execute_with(|| { + // On the parachain, BOB has received the derivative tokens + assert_eq!(parachain::Balances::free_balance(&BOB), 50 * CENTS); + + // BOB gives back half to ALICE in the relay chain + let destination: Location = Parent.into(); + let beneficiary: Location = + AccountId32 { id: ALICE.clone().into(), network: Some(NetworkId::Polkadot) }.into(); + // We specify `Parent` because we are referencing the Relay Chain token. + // This chain doesn't have a token of its own, so we always refer to this token, + // and we do so by the Location of the Relay Chain. + let assets: Assets = (Parent, 25u128 * CENTS as u128).into(); + assert_ok!(parachain::XcmPallet::transfer_assets( + parachain::RuntimeOrigin::signed(BOB), + Box::new(VersionedLocation::V4(destination)), + Box::new(VersionedLocation::V4(beneficiary)), + Box::new(VersionedAssets::V4(assets)), + 0, + WeightLimit::Unlimited, + )); + + // BOB's balance decreased + assert_eq!(parachain::Balances::free_balance(&BOB), 25 * CENTS); + }); + + Relay::execute_with(|| { + // ALICE's balance increases + assert_eq!( + relay_chain::Balances::free_balance(&ALICE), + INITIAL_BALANCE - 50 * CENTS + 25 * CENTS + ); + + // The funds in the parachain's sovereign account decrease. + let parachain: Location = Parachain(2222).into(); + let parachains_sovereign_account = + relay_chain::LocationToAccountId::convert_location(¶chain).unwrap(); + assert_eq!(relay_chain::Balances::free_balance(parachains_sovereign_account), 25 * CENTS); + }); +} diff --git a/polkadot/xcm/docs/src/fundamentals.rs b/polkadot/xcm/docs/src/fundamentals.rs new file mode 100644 index 00000000000..28899df801a --- /dev/null +++ b/polkadot/xcm/docs/src/fundamentals.rs @@ -0,0 +1,177 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Fundamentals +//! +//! XCM standardizes usual actions users take in consensus systems, for example +//! dealing with assets locally, on other chains, and locking them. +//! XCM programs can both be executed locally or sent to a different consensus system. +//! Examples of consensus systems are blockchains and smart contracts. +//! +//! The goal of XCM is to allow multi-chain ecosystems to thrive via specialization. +//! Very specific functionalities can be abstracted away and standardized in this common language. +//! Then, every member of the ecosystem can implement the subset of the language that makes sense +//! for them. +//! +//! The language evolves over time to accomodate the needs of the community +//! via the [RFC process](https://github.com/paritytech/xcm-format/blob/master/proposals/0032-process.md). +//! +//! XCM is the language, it deals with interpreting and executing programs. +//! It does not deal with actually **sending** these programs from one consensus system to another. +//! This responsibility falls to a transport protocol. +//! XCM can even be interpreted on the local system, with no need of a transport protocol. +//! However, automatic and composable workflows can be achieved via the use of one. +//! +//! At the core of XCM lies the XCVM, the Cross-Consensus Virtual Machine. +//! It's the virtual machine that executes XCM programs. +//! It is a specification that comes with the language. +//! +//! For these docs, we'll use a Rust implementation of XCM and the XCVM, consisting of the following +//! parts: +//! - [`XCM`](xcm): Holds the definition of an XCM program, the instructions and main concepts. +//! - [`Executor`](xcm_executor): Implements the XCVM, capable of executing XCMs. Highly +//! configurable. +//! - [`Builder`](xcm_builder): A collection of types used to configure the executor. +//! - [`XCM Pallet`](pallet_xcm): A FRAME pallet for interacting with the executor. +//! - [`Simulator`](xcm_simulator): A playground to tinker with different XCM programs and executor +//! configurations. +//! +//! XCM programs are composed of Instructions, which reference Locations and Assets. +//! +//! ## Locations +//! +//! Locations are XCM's vocabulary of places we want to talk about in our XCM programs. +//! They are used to reference things like 32-byte accounts, governance bodies, smart contracts, +//! blockchains and more. +//! +//! Locations are hierarchical. +//! This means some places in consensus are wholly encapsulated in other places. +//! Say we have two systems A and B. +//! If any change in A's state implies a change in B's state, then we say A is interior to B. +#![doc = simple_mermaid::mermaid!("../mermaid/location_hierarchy.mmd")] +//! +//! Parachains are interior to their Relay Chain, since a change in their state implies a change in +//! the Relay Chain's state. +//! +//! Because of this hierarchy, the way we represent locations is with both a number of **parents**, +//! times we move __up__ the hierarchy, and a sequence of **junctions**, the steps we take __down__ +//! the hierarchy after going up the specified number of parents. +//! +//! In Rust, this is specified with the following datatype: +//! ```ignore +//! pub struct Location { +//! parents: u8, +//! interior: Junctions, +//! } +//! ``` +//! +//! Many junctions are available; parachains, pallets, 32 and 20 byte accounts, governance bodies, +//! and arbitrary indices are the most common. +//! A full list of available junctions can be found in the [format](https://github.com/paritytech/xcm-format#interior-locations--junctions) +//! and [Junction enum](xcm::v4::prelude::Junction). +//! +//! We'll use a file system notation to represent locations, and start with relative locations. +//! In the diagram, the location of parachain 1000 as seen from all other locations is as follows: +//! - From the relaychain: `Parachain(1000)` +//! - From parachain 1000 itself: `Here` +//! - From parachain 2000: `../Parachain(1000)` +//! +//! Relative locations are interpreted by the system that is executing an XCM program, which is the +//! receiver of a message in the case where it's sent. +//! +//! Locations can also be absolute. +//! Keeping in line with our filesystem analogy, we can imagine the root of our filesystem to exist. +//! This would be a location with no parents, that is also the parent of all systems that derive +//! their own consensus, say Polkadot or Ethereum or Bitcoin. +//! Such a location does not exist concretely, but we can still use this definition for it. +//! This is the **universal location**. +//! We need the universal location to be able to describe locations in an absolute way. +#![doc = simple_mermaid::mermaid!("../mermaid/universal_location.mmd")] +//! +//! Here, the absolute location of parachain 1000 would be +//! `GlobalConsensus(Polkadot)/Parachain(1000)`. +//! +//! ## Assets +//! +//! We want to be able to reference assets in our XCM programs, if only to be able to pay for fees. +//! Assets are represented using locations. +//! +//! The native asset of a chain is represented by the location of that chain. +//! For example, DOT is represented by the location of the Polkadot relaychain. +//! If the interpreting chain has its own asset, it would be represented by `Here`. +//! +//! How do we represent other assets? +//! The asset hub system parachain in Polkadot, for example, holds a lot of assets. +//! To represent each of them, it uses the indices we mentioned, and it makes them interior to the +//! assets pallet instance it uses. +//! USDT, an example asset that lives on asset hub, is identified by the location +//! `Parachain(1000)/PalletInstance(53)/GeneralIndex(1984)`, when seen from the Polkadot relaychain. +#![doc = simple_mermaid::mermaid!("../mermaid/usdt_location.mmd")] +//! +//! Asset Hub also has another type of assets called `ForeignAssets`. +//! These assets are identified by the XCM Location to their origin. +//! Two such assets are a Parachain asset, like Moonbeam's GLMR, and KSM, from the cousin Kusama +//! network. These are represented as `../Parachain(2004)/PalletInstance(10)` and +//! `../../GlobalConsensus(Kusama)` respectively. +//! +//! The whole type can be seen in the [format](https://github.com/paritytech/xcm-format#6-universal-asset-identifiers) +//! and [rust docs](xcm::v4::prelude::Asset). +//! +//! ## Instructions +//! +//! Given the vocabulary to talk about both locations -- chains and accounts -- and assets, we now +//! need a way to express what we want the consensus system to do when executing our programs. +//! We need a way of writing our programs. +//! +//! XCM programs are composed of a sequence of instructions. +//! +//! All available instructions can be seen in the [format](https://github.com/paritytech/xcm-format#5-the-xcvm-instruction-set) +//! and the [Instruction enum](xcm::v4::prelude::Instruction). +//! +//! A very simple example is the following: +//! +//! ```ignore +//! let message = Xcm(vec![ +//! TransferAsset { assets, beneficiary }, +//! ]); +//! ``` +//! +//! This instruction is enough to transfer `assets` from the account of the **origin** of a message +//! to the `beneficiary` account. However, because of XCM's generality, fees need to be paid +//! explicitly. This next example sheds more light on this: +//! +//! ```ignore +//! let message = Xcm(vec![ +//! WithdrawAsset(assets), +//! BuyExecution { fees: assets, weight_limit }, +//! DepositAsset { assets: AssetFilter(Wild(All)), beneficiary }, +//! ]); +//! ``` +//! +//! Here we see the process of transferring assets was broken down into smaller instructions, and we +//! add the explicit fee payment step in the middle. +//! `WithdrawAsset` withdraws assets from the account of the **origin** of the message for usage +//! inside this message's execution. `BuyExecution` explicitly buys execution for this program using +//! the assets specified in `fees`, with a sanity check of `weight_limit`. `DepositAsset` uses a +//! wildcard, specifying all remaining `assets` after subtracting the fees and a `beneficiary` +//! account. +//! +//! ## Next steps +//! +//! Continue with the [guides](crate::guides) for step-by-step tutorials on XCM, +//! or jump to the [cookbook](crate::cookbook) to see examples. +//! +//! The [glossary](crate::glossary) can be useful if some of the terms are confusing. diff --git a/polkadot/xcm/docs/src/glossary.rs b/polkadot/xcm/docs/src/glossary.rs new file mode 100644 index 00000000000..6035888ab73 --- /dev/null +++ b/polkadot/xcm/docs/src/glossary.rs @@ -0,0 +1,123 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Glossary +//! +//! ## XCM (Cross-Consensus Messaging) +//! +//! A messaging format meant to communicate intentions between consensus systems. +//! XCM could also refer to a single message. +//! +//! ## Instructions +//! +//! XCMs are composed of a sequence of instructions. +//! Each instruction aims to convey a particular intention. +//! There are instructions for transferring and locking assets, handling fees, calling arbitrary +//! blobs, and more. +//! +//! ## Consensus system +//! +//! A system that can reach any kind of consensus. +//! For example, relay chains, parachains, smart contracts. +//! Most messaging between consensus systems has to be done asynchronously, for this, XCM is used. +//! Between two smart contracts on the same parachain, however, communication can be done +//! synchronously. +//! +//! ## [`Location`](xcm::v4::prelude::Location) +//! +//! A way of addressing consensus systems. +//! These could be relative or absolute. +//! +//! ## [`Junction`](xcm::v4::prelude::Junction) +//! +//! The different ways of descending down a [`Location`](xcm::v4::prelude::Location) hierarchy. +//! A junction can be a Parachain, an Account, or more. +//! +//! ## [`Asset`](xcm::v4::prelude::Asset) +//! +//! A way of identifying assets in the same or another consensus system, by using a +//! [`Location`](xcm::v4::prelude::Location). +//! +//! ## Sovereign account +//! +//! An account in a consensus system that is controlled by an account in another consensus system. +//! +//! Runtimes use a converter between a [`Location`](xcm::v4::prelude::Location) and an account. +//! These converters implement the [`ConvertLocation`](xcm_executor::traits::ConvertLocation) trait. +//! +//! ## Teleport +//! +//! A way of transferring assets between two consensus systems without the need of a third party. +//! It consists of the sender system burning the asset that wants to be sent over and the recipient +//! minting an equivalent amount of that asset. It requires a lot of trust between the two systems, +//! since failure to mint or burn will reduce or increase the total issuance of the token. +//! +//! ## Reserve asset transfer +//! +//! A way of transferring assets between two consensus systems that don't trust each other, by using +//! a third system they both trust, called the reserve. The real asset only exists on the reserve, +//! both sender and recipient only deal with derivatives. It consists of the sender burning a +//! certain amount of derivatives, telling the reserve to move real assets from its sovereign +//! account to the destination's sovereign account, and then telling the recipient to mint the right +//! amount of derivatives. +//! In practice, the reserve chain can also be one of the source or destination. +//! +//! ## XCVM +//! +//! The virtual machine behind XCM. +//! Every XCM is an XCVM programme. +//! Holds state in registers. +//! +//! An implementation of the virtual machine is the [`xcm-executor`](xcm_executor::XcmExecutor). +//! +//! ## Holding register +//! +//! An XCVM register used to hold arbitrary `Asset`s during the execution of an XCVM programme. +//! +//! ## Barrier +//! +//! An XCM executor configuration item that works as a firewall for incoming XCMs. +//! All XCMs have to pass the barrier to be executed, else they are dropped. +//! It can be used for whitelisting only certain types or messages or messages from certain senders. +//! +//! Lots of barrier definitions exist in [`xcm-builder`](xcm_builder). +//! +//! ## VMP (Vertical Message Passing) +//! +//! Umbrella term for both UMP (Upward Message Passing) and DMP (Downward Message Passing). +//! +//! The following diagram shows the uses of both protocols: +#![doc = simple_mermaid::mermaid!("../mermaid/transport_protocols.mmd")] +//! +//! ## UMP (Upward Message Passing) +//! +//! Transport-layer protocol that allows parachains to send messages upwards to their relay chain. +//! +//! ## DMP (Downward Message Passing) +//! +//! Transport-layer protocol that allows the relay chain to send messages downwards to one of their +//! parachains. +//! +//! ## XCMP (Cross-Consensus Message Passing) +//! +//! Transport-layer protocol that allows parachains to send messages between themselves, without +//! going through the relay chain. +//! +//! ## HRMP (Horizontal Message Passing) +//! +//! Transport-layer protocol that allows a parachain to send messages to a sibling parachain going +//! through the relay chain. It's a precursor to XCMP, also known as XCMP-lite. +//! It uses a mixture of UMP and DMP. diff --git a/polkadot/xcm/docs/src/guides/mod.rs b/polkadot/xcm/docs/src/guides/mod.rs new file mode 100644 index 00000000000..5af89428d9a --- /dev/null +++ b/polkadot/xcm/docs/src/guides/mod.rs @@ -0,0 +1,25 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Guides +//! +//! These guides aim to get you up and running with XCM. +//! +//! Coming soon. +//! +//! ## Next steps +//! +//! Jump to the [cookbook](crate::cookbook) for different examples. diff --git a/polkadot/xcm/docs/src/lib.rs b/polkadot/xcm/docs/src/lib.rs new file mode 100644 index 00000000000..287c97140c9 --- /dev/null +++ b/polkadot/xcm/docs/src/lib.rs @@ -0,0 +1,63 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Docs +//! +//! Documentation and guides for XCM +//! +//! Welcome to the Cross-Consensus Messaging documentation! +//! +//! XCM is a **language** for communicating **intentions** between **consensus systems**. +//! Whether you're a developer, a blockchain enthusiast, or just interested in Polkadot, this guide +//! aims to provide you with an easy-to-understand and comprehensive introduction to XCM. +//! +//! ## Getting started +//! +//! Head over to the [fundamentals](fundamentals) section. +//! Then, go to the [guides](guides), to learn about how to do things with XCM. +//! +//! ## Cookbook +//! +//! There's also the [cookbook](cookbook) for useful recipes for XCM. +//! +//! ## Glossary +//! +//! There's a [glossary](glossary) with common terms used throughout the docs. +//! +//! ## Contribute +//! +//! To contribute to the format, check out the [RFC process](https://github.com/paritytech/xcm-format/blob/master/proposals/0032-process.md). +//! To contribute to these docs, [make a PR](https://github.com/paritytech/polkadot-sdk). +//! +//! ## Why Rust Docs? +//! +//! Rust Docs allow docs to be as close to the source as possible. +//! They're also available offline automatically for anyone who has the `polkadot-sdk` repo locally. +//! +//! ## Docs structure +#![doc = simple_mermaid::mermaid!("../mermaid/structure.mmd")] + +/// Fundamentals of the XCM language. The virtual machine, instructions, locations and assets. +pub mod fundamentals; + +/// Step-by-step guides to set up an XCM environment and start hacking. +pub mod guides; + +/// Useful recipes for programs and configurations. +pub mod cookbook; + +/// Glossary +pub mod glossary; diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index 9324359d365..fc09b5e3186 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -11,15 +11,19 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.12" } +scale-info = { version = "2.6.0", default-features = false } paste = "1.0.7" frame-support = { path = "../../../substrate/frame/support" } +frame-system = { path = "../../../substrate/frame/system" } sp-io = { path = "../../../substrate/primitives/io" } sp-std = { path = "../../../substrate/primitives/std" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } xcm = { package = "staging-xcm", path = ".." } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor" } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } +polkadot-primitives = { path = "../../primitives" } polkadot-core-primitives = { path = "../../core-primitives" } polkadot-parachain-primitives = { path = "../../parachain" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs index 8021f955165..93c8302757c 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs @@ -16,7 +16,6 @@ //! Parachain runtime mock. -mod mock_msg_queue; mod xcm_config; pub use xcm_config::*; @@ -36,6 +35,7 @@ use sp_std::prelude::*; use xcm::latest::prelude::*; use xcm_builder::{EnsureXcmOrigin, SignedToAccountId32}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_simulator::mock_message_queue; pub type AccountId = AccountId32; pub type Balance = u128; @@ -121,7 +121,7 @@ parameter_types! { pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); } -impl mock_msg_queue::Config for Runtime { +impl mock_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmExecutor = XcmExecutor; } @@ -175,7 +175,7 @@ construct_runtime!( pub struct Runtime { System: frame_system, Balances: pallet_balances, - MsgQueue: mock_msg_queue, + MsgQueue: mock_message_queue, PolkadotXcm: pallet_xcm, ForeignUniques: pallet_uniques, } diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs index f6d0174def8..0769507ec37 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs @@ -14,9 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::parachain::MsgQueue; +use crate::parachain::Runtime; use frame_support::parameter_types; use xcm::latest::prelude::*; +use xcm_simulator::mock_message_queue::ParachainId; parameter_types! { pub KsmPerSecondPerByte: (AssetId, u128, u128) = (AssetId(Parent.into()), 1, 1); @@ -26,5 +27,5 @@ parameter_types! { parameter_types! { pub const KsmLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainId::::get().into())].into(); } diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index 6486a849af3..34c1feb6e94 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use codec::Encode; use frame_support::{assert_ok, weights::Weight}; use xcm::latest::QueryResponseInfo; -use xcm_simulator::TestExt; +use xcm_simulator::{mock_message_queue::ReceivedDmp, TestExt}; // Helper function for forming buy execution message fn buy_execution(fees: impl Into) -> Instruction { @@ -171,7 +171,7 @@ fn remote_locking_and_unlocking() { ParaA::execute_with(|| { assert_eq!( - parachain::MsgQueue::received_dmp(), + ReceivedDmp::::get(), vec![Xcm(vec![NoteUnlockable { owner: (Parent, Parachain(2)).into(), asset: (Parent, locked_amount).into() @@ -501,7 +501,7 @@ fn query_holding() { // Check that QueryResponse message was received ParaA::execute_with(|| { assert_eq!( - parachain::MsgQueue::received_dmp(), + ReceivedDmp::::get(), vec![Xcm(vec![QueryResponse { query_id: query_id_set, response: Response::Assets(Assets::new()), diff --git a/polkadot/xcm/xcm-simulator/src/lib.rs b/polkadot/xcm/xcm-simulator/src/lib.rs index 7efbc658bbf..a6747a4789e 100644 --- a/polkadot/xcm/xcm-simulator/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/src/lib.rs @@ -16,6 +16,10 @@ //! Test kit to simulate cross-chain message passing and XCM execution. +/// Implementation of a simple message queue. +/// Used for sending messages. +pub mod mock_message_queue; + pub use codec::Encode; pub use paste; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs b/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs similarity index 72% rename from polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs rename to polkadot/xcm/xcm-simulator/src/mock_message_queue.rs index 17cde921f3e..96b47999fe9 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs +++ b/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs @@ -1,4 +1,4 @@ -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -14,14 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -pub use pallet::*; -use polkadot_core_primitives::BlockNumber as RelayBlockNumber; +//! Simple mock message queue. + +use codec::{Decode, Encode}; + use polkadot_parachain_primitives::primitives::{ DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, }; +use polkadot_primitives::BlockNumber as RelayBlockNumber; use sp_runtime::traits::{Get, Hash}; + +use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedXcm}; +pub use pallet::*; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -41,15 +48,15 @@ pub mod pallet { pub struct Pallet(_); #[pallet::storage] - pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + pub type ParachainId = StorageValue<_, ParaId, ValueQuery>; #[pallet::storage] /// A queue of received DMP messages - pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + pub type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; impl Get for Pallet { fn get() -> ParaId { - Self::parachain_id() + ParachainId::::get() } } @@ -60,45 +67,34 @@ pub mod pallet { pub enum Event { // XCMP /// Some XCM was executed OK. - Success(Option), + Success { message_id: Option }, /// Some XCM failed. - Fail(Option, XcmError), + Fail { message_id: Option, error: XcmError }, /// Bad XCM version used. - BadVersion(Option), + BadVersion { message_id: Option }, /// Bad XCM format used. - BadFormat(Option), + BadFormat { message_id: Option }, // DMP /// Downward message is invalid XCM. - InvalidFormat(MessageId), + InvalidFormat { message_id: MessageId }, /// Downward message is unsupported version of XCM. - UnsupportedVersion(MessageId), + UnsupportedVersion { message_id: MessageId }, /// Downward message executed with the given outcome. - ExecutedDownward(MessageId, Outcome), + ExecutedDownward { message_id: MessageId, outcome: Outcome }, } impl Pallet { - /// Get the Parachain Id. - pub fn parachain_id() -> ParaId { - ParachainId::::get() - } - - /// Set the Parachain Id. pub fn set_para_id(para_id: ParaId) { ParachainId::::put(para_id); } - /// Get the queue of receieved DMP messages. - pub fn received_dmp() -> Vec> { - ReceivedDmp::::get() - } - fn handle_xcmp_message( sender: ParaId, _sent_at: RelayBlockNumber, xcm: VersionedXcm, - max_weight: Weight, - ) -> Result { + max_weight: xcm::latest::Weight, + ) -> Result { let hash = Encode::using_encoded(&xcm, T::Hashing::hash); let mut message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); let (result, event) = match Xcm::::try_from(xcm) { @@ -111,15 +107,20 @@ pub mod pallet { max_weight, Weight::zero(), ) { - Outcome::Error { error } => (Err(error), Event::Fail(Some(hash), error)), - Outcome::Complete { used } => (Ok(used), Event::Success(Some(hash))), + Outcome::Error { error } => + (Err(error), Event::Fail { message_id: Some(hash), error }), + Outcome::Complete { used } => + (Ok(used), Event::Success { message_id: Some(hash) }), // As far as the caller is concerned, this was dispatched without error, so // we just report the weight used. Outcome::Incomplete { used, error } => - (Ok(used), Event::Fail(Some(hash), error)), + (Ok(used), Event::Fail { message_id: Some(hash), error }), } }, - Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + Err(()) => ( + Err(XcmError::UnhandledXcmVersion), + Event::BadVersion { message_id: Some(hash) }, + ), }; Self::deposit_event(event); result @@ -129,8 +130,8 @@ pub mod pallet { impl XcmpMessageHandler for Pallet { fn handle_xcmp_messages<'a, I: Iterator>( iter: I, - max_weight: Weight, - ) -> Weight { + max_weight: xcm::latest::Weight, + ) -> xcm::latest::Weight { for (sender, sent_at, data) in iter { let mut data_ref = data; let _ = XcmpMessageFormat::decode(&mut data_ref) @@ -156,15 +157,16 @@ pub mod pallet { iter: impl Iterator)>, limit: Weight, ) -> Weight { - for (_i, (_sent_at, data)) in iter.enumerate() { + for (_sent_at, data) in iter { let mut id = sp_io::hashing::blake2_256(&data[..]); let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); match maybe_versioned { Err(_) => { - Self::deposit_event(Event::InvalidFormat(id)); + Self::deposit_event(Event::InvalidFormat { message_id: id }); }, Ok(versioned) => match Xcm::try_from(versioned) { - Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Err(()) => + Self::deposit_event(Event::UnsupportedVersion { message_id: id }), Ok(x) => { let outcome = T::XcmExecutor::prepare_and_execute( Parent, @@ -173,8 +175,11 @@ pub mod pallet { limit, Weight::zero(), ); - >::append(x); - Self::deposit_event(Event::ExecutedDownward(id, outcome)); + ReceivedDmp::::append(x); + Self::deposit_event(Event::ExecutedDownward { + message_id: id, + outcome, + }); }, }, } -- GitLab From 717eb2c4ac58c6fec1dfbcb47aa84a21646f8e46 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 16 May 2024 10:50:34 +0200 Subject: [PATCH 018/106] [ci] Fix publish-subsystem-benchmarks (#4479) Fix after https://github.com/paritytech/polkadot-sdk/pull/4449 --- .gitlab/pipeline/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 68712610ad2..8b27c724748 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -129,7 +129,7 @@ trigger_workflow: curl -q -X POST \ -H "Accept: application/vnd.github.v3+json" \ -H "Authorization: token $GITHUB_TOKEN" \ - https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/publish-subsystem-benchmarks.yml/dispatches \ -d "{\"ref\":\"refs/heads/master\",\"inputs\":{\"benchmark-data-dir-path\":\"$benchmark_dir\",\"output-file-path\":\"$benchmark_name\"}}"; sleep 300; done -- GitLab From 76230a15c7e28f57530801b24236b12e73c0f53f Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 16 May 2024 10:55:31 +0200 Subject: [PATCH 019/106] Deprecate `dmp-queue` pallet (#4475) `cumulus-pallet-dmp-queue` is not needed anymore since https://github.com/paritytech/polkadot-sdk/pull/1246. The only logic that remains in the pallet is a lazy migration in the [`on_idle`](https://github.com/paritytech/polkadot-sdk/blob/8d62c13b2541920c37fb9d9ca733fcce91e96573/cumulus/pallets/dmp-queue/src/lib.rs#L158) hook. --------- Signed-off-by: Oliver Tale-Yazdi --- cumulus/pallets/dmp-queue/src/lib.rs | 4 ++++ prdoc/pr_4475.prdoc | 10 ++++++++++ 2 files changed, 14 insertions(+) create mode 100644 prdoc/pr_4475.prdoc diff --git a/cumulus/pallets/dmp-queue/src/lib.rs b/cumulus/pallets/dmp-queue/src/lib.rs index 79cc4bc895e..9b3ec684feb 100644 --- a/cumulus/pallets/dmp-queue/src/lib.rs +++ b/cumulus/pallets/dmp-queue/src/lib.rs @@ -21,6 +21,7 @@ //! from the runtime once `Completed` was emitted. #![cfg_attr(not(feature = "std"), no_std)] +#![allow(deprecated)] // The pallet itself is deprecated. use migration::*; pub use pallet::*; @@ -38,6 +39,9 @@ pub type MaxDmpMessageLenOf = <::DmpSink as frame_support::traits::HandleMessage>::MaxMessageLen; #[frame_support::pallet] +#[deprecated( + note = "`cumulus-pallet-dmp-queue` will be removed after November 2024. It can be removed once its lazy migration completed. See ." +)] pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::HandleMessage, weights::WeightMeter}; diff --git a/prdoc/pr_4475.prdoc b/prdoc/pr_4475.prdoc new file mode 100644 index 00000000000..30093dcd32b --- /dev/null +++ b/prdoc/pr_4475.prdoc @@ -0,0 +1,10 @@ +title: "Deprecate dmp-queue pallet" + +doc: + - audience: Runtime Dev + description: | + Schedule the DMP queue pallet for deletion. It is not needed anymore sine https://github.com/paritytech/polkadot-sdk/pull/1246. + +crates: + - name: cumulus-pallet-dmp-queue + bump: minor -- GitLab From 6487ac1ede14b5785be1429655ed8c387d82be9a Mon Sep 17 00:00:00 2001 From: "polka.dom" Date: Thu, 16 May 2024 05:01:29 -0400 Subject: [PATCH 020/106] Remove pallet::getter usage from the bounties and child-bounties pallets (#4392) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As per #3326, removes pallet::getter usage from the bounties and child-bounties pallets. The syntax `StorageItem::::get()` should be used instead. Changes to one pallet involved changes in the other, so I figured it'd be best to combine these two. cc @muraca --------- Co-authored-by: Bastian Kรถcher --- prdoc/pr_4392.prdoc | 16 +++++ substrate/frame/bounties/src/lib.rs | 6 +- substrate/frame/bounties/src/tests.rs | 51 ++++++++-------- .../frame/child-bounties/src/benchmarking.rs | 2 +- substrate/frame/child-bounties/src/lib.rs | 29 ++++----- substrate/frame/child-bounties/src/tests.rs | 59 ++++++++++--------- 6 files changed, 88 insertions(+), 75 deletions(-) create mode 100644 prdoc/pr_4392.prdoc diff --git a/prdoc/pr_4392.prdoc b/prdoc/pr_4392.prdoc new file mode 100644 index 00000000000..898ce9be069 --- /dev/null +++ b/prdoc/pr_4392.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `pallet::getter` usage from both bounties and child bounties pallet + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter`s from `pallet-bounties` and `pallet-child-bounties`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-bounties + bump: major + - name: pallet-child-bounties + bump: major diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index c099fc48b7a..c930868bf10 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -303,12 +303,10 @@ pub mod pallet { /// Number of bounty proposals that have been made. #[pallet::storage] - #[pallet::getter(fn bounty_count)] pub type BountyCount, I: 'static = ()> = StorageValue<_, BountyIndex, ValueQuery>; /// Bounties that have been made. #[pallet::storage] - #[pallet::getter(fn bounties)] pub type Bounties, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -318,13 +316,11 @@ pub mod pallet { /// The description of each bounty. #[pallet::storage] - #[pallet::getter(fn bounty_descriptions)] pub type BountyDescriptions, I: 'static = ()> = StorageMap<_, Twox64Concat, BountyIndex, BoundedVec>; /// Bounty indices that have been approved but not yet funded. #[pallet::storage] - #[pallet::getter(fn bounty_approvals)] pub type BountyApprovals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; @@ -849,7 +845,7 @@ impl, I: 'static> Pallet { description.try_into().map_err(|_| Error::::ReasonTooBig)?; ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); - let index = Self::bounty_count(); + let index = BountyCount::::get(); // reserve deposit for new bounty let bond = T::BountyDepositBase::get() + diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index de747db5374..a89f4ff9fbf 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -534,7 +534,7 @@ fn propose_bounty_works() { assert_eq!(Balances::free_balance(0), 100 - deposit); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -545,9 +545,12 @@ fn propose_bounty_works() { } ); - assert_eq!(Bounties::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); + assert_eq!( + pallet_bounties::BountyDescriptions::::get(0).unwrap(), + b"1234567890".to_vec() + ); - assert_eq!(Bounties::bounty_count(), 1); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); }); } @@ -598,10 +601,10 @@ fn close_bounty_works() { assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100 - deposit); - assert_eq!(Bounties::bounties(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); assert!(!pallet_treasury::Proposals::::contains_key(0)); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -622,7 +625,7 @@ fn approve_bounty_works() { let deposit: u64 = 80 + 5; assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -632,7 +635,7 @@ fn approve_bounty_works() { status: BountyStatus::Approved, } ); - assert_eq!(Bounties::bounty_approvals(), vec![0]); + assert_eq!(pallet_bounties::BountyApprovals::::get(), vec![0]); assert_noop!( Bounties::close_bounty(RuntimeOrigin::root(), 0), @@ -650,7 +653,7 @@ fn approve_bounty_works() { assert_eq!(Balances::free_balance(0), 100); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -693,7 +696,7 @@ fn assign_curator_works() { assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -720,7 +723,7 @@ fn assign_curator_works() { let expected_deposit = Bounties::calculate_curator_deposit(&fee); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -755,7 +758,7 @@ fn unassign_curator_works() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(4), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -773,7 +776,7 @@ fn unassign_curator_works() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -817,7 +820,7 @@ fn award_and_claim_bounty_works() { assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -851,8 +854,8 @@ fn award_and_claim_bounty_works() { assert_eq!(Balances::free_balance(3), 56); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); - assert_eq!(Bounties::bounties(0), None); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -892,8 +895,8 @@ fn claim_handles_high_fee() { assert_eq!(Balances::free_balance(3), 0); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); - assert_eq!(Bounties::bounties(0), None); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -918,7 +921,7 @@ fn cancel_and_refund() { )); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -978,8 +981,8 @@ fn award_and_cancel() { assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Bounties::bounties(0), None); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -1015,7 +1018,7 @@ fn expire_and_unassign() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(0), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, @@ -1065,7 +1068,7 @@ fn extend_expiry() { assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, @@ -1079,7 +1082,7 @@ fn extend_expiry() { assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, @@ -1190,7 +1193,7 @@ fn unassign_curator_self() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index 1973564d0dc..947cfcfaa96 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -109,7 +109,7 @@ fn activate_bounty( child_bounty_setup.reason.clone(), )?; - child_bounty_setup.bounty_id = Bounties::::bounty_count() - 1; + child_bounty_setup.bounty_id = pallet_bounties::BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 1eedeaa5a1a..04a1f9799cb 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -181,19 +181,16 @@ pub mod pallet { /// Number of total child bounties. #[pallet::storage] - #[pallet::getter(fn child_bounty_count)] pub type ChildBountyCount = StorageValue<_, BountyIndex, ValueQuery>; /// Number of child bounties per parent bounty. /// Map of parent bounty index to number of child bounties. #[pallet::storage] - #[pallet::getter(fn parent_child_bounties)] pub type ParentChildBounties = StorageMap<_, Twox64Concat, BountyIndex, u32, ValueQuery>; /// Child bounties that have been added. #[pallet::storage] - #[pallet::getter(fn child_bounties)] pub type ChildBounties = StorageDoubleMap< _, Twox64Concat, @@ -205,13 +202,11 @@ pub mod pallet { /// The description of each child-bounty. #[pallet::storage] - #[pallet::getter(fn child_bounty_descriptions)] pub type ChildBountyDescriptions = StorageMap<_, Twox64Concat, BountyIndex, BoundedVec>; /// The cumulative child-bounty curator fee for each parent bounty. #[pallet::storage] - #[pallet::getter(fn children_curator_fees)] pub type ChildrenCuratorFees = StorageMap<_, Twox64Concat, BountyIndex, BalanceOf, ValueQuery>; @@ -251,7 +246,7 @@ pub mod pallet { description.try_into().map_err(|_| BountiesError::::ReasonTooBig)?; ensure!(value >= T::ChildBountyValueMinimum::get(), BountiesError::::InvalidValue); ensure!( - Self::parent_child_bounties(parent_bounty_id) <= + ParentChildBounties::::get(parent_bounty_id) <= T::MaxActiveChildBountyCount::get() as u32, Error::::TooManyChildBounties, ); @@ -276,15 +271,15 @@ pub mod pallet { )?; // Get child-bounty ID. - let child_bounty_id = Self::child_bounty_count(); + let child_bounty_id = ChildBountyCount::::get(); let child_bounty_account = Self::child_bounty_account_id(child_bounty_id); // Transfer funds from parent bounty to child-bounty. T::Currency::transfer(&parent_bounty_account, &child_bounty_account, value, KeepAlive)?; // Increment the active child-bounty count. - >::mutate(parent_bounty_id, |count| count.saturating_inc()); - >::put(child_bounty_id.saturating_add(1)); + ParentChildBounties::::mutate(parent_bounty_id, |count| count.saturating_inc()); + ChildBountyCount::::put(child_bounty_id.saturating_add(1)); // Create child-bounty instance. Self::create_child_bounty( @@ -710,12 +705,12 @@ pub mod pallet { }); // Update the active child-bounty tracking count. - >::mutate(parent_bounty_id, |count| { + ParentChildBounties::::mutate(parent_bounty_id, |count| { count.saturating_dec() }); // Remove the child-bounty description. - >::remove(child_bounty_id); + ChildBountyDescriptions::::remove(child_bounty_id); // Remove the child-bounty instance from the state. *maybe_child_bounty = None; @@ -817,7 +812,7 @@ impl Pallet { fn ensure_bounty_active( bounty_id: BountyIndex, ) -> Result<(T::AccountId, BlockNumberFor), DispatchError> { - let parent_bounty = pallet_bounties::Pallet::::bounties(bounty_id) + let parent_bounty = pallet_bounties::Bounties::::get(bounty_id) .ok_or(BountiesError::::InvalidIndex)?; if let BountyStatus::Active { curator, update_due } = parent_bounty.get_status() { Ok((curator, update_due)) @@ -862,7 +857,7 @@ impl Pallet { ChildrenCuratorFees::::mutate(parent_bounty_id, |value| { *value = value.saturating_sub(child_bounty.fee) }); - >::mutate(parent_bounty_id, |count| { + ParentChildBounties::::mutate(parent_bounty_id, |count| { *count = count.saturating_sub(1) }); @@ -880,7 +875,7 @@ impl Pallet { debug_assert!(transfer_result.is_ok()); // Remove the child-bounty description. - >::remove(child_bounty_id); + ChildBountyDescriptions::::remove(child_bounty_id); *maybe_child_bounty = None; @@ -901,14 +896,14 @@ impl pallet_bounties::ChildBountyManager> for Pallet fn child_bounties_count( bounty_id: pallet_bounties::BountyIndex, ) -> pallet_bounties::BountyIndex { - Self::parent_child_bounties(bounty_id) + ParentChildBounties::::get(bounty_id) } fn children_curator_fees(bounty_id: pallet_bounties::BountyIndex) -> BalanceOf { // This is asked for when the parent bounty is being claimed. No use of // keeping it in state after that. Hence removing. - let children_fee_total = Self::children_curator_fees(bounty_id); - >::remove(bounty_id); + let children_fee_total = ChildrenCuratorFees::::get(bounty_id); + ChildrenCuratorFees::::remove(bounty_id); children_fee_total } } diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 30601f821e4..d9405d3d289 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -264,7 +264,7 @@ fn add_child_bounty() { // DB check. // Check the child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -275,10 +275,13 @@ fn add_child_bounty() { ); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 1); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 1); // Check the child-bounty description status. - assert_eq!(ChildBounties::child_bounty_descriptions(0).unwrap(), b"12345-p1".to_vec(),); + assert_eq!( + pallet_child_bounties::ChildBountyDescriptions::::get(0).unwrap(), + b"12345-p1".to_vec(), + ); }); } @@ -340,7 +343,7 @@ fn child_bounty_assign_curator() { assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -364,7 +367,7 @@ fn child_bounty_assign_curator() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -441,7 +444,7 @@ fn award_claim_child_bounty() { let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -479,7 +482,7 @@ fn award_claim_child_bounty() { assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); }); } @@ -528,7 +531,7 @@ fn close_child_bounty_added() { assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Parent-bounty account status. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); @@ -582,7 +585,7 @@ fn close_child_bounty_active() { assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Ensure child-bounty curator balance is unreserved. assert_eq!(Balances::free_balance(8), 101); @@ -647,7 +650,7 @@ fn close_child_bounty_pending() { ); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 1); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 1); // Ensure no changes in child-bounty curator balance. assert_eq!(Balances::reserved_balance(8), expected_child_deposit); @@ -739,7 +742,7 @@ fn child_bounty_curator_proposed_unassign_curator() { assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -757,7 +760,7 @@ fn child_bounty_curator_proposed_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -820,7 +823,7 @@ fn child_bounty_active_unassign_curator() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -838,7 +841,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -859,7 +862,7 @@ fn child_bounty_active_unassign_curator() { let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -877,7 +880,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -896,7 +899,7 @@ fn child_bounty_active_unassign_curator() { assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -914,7 +917,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -935,7 +938,7 @@ fn child_bounty_active_unassign_curator() { let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -963,7 +966,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1025,7 +1028,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1056,7 +1059,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1087,7 +1090,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { let expected_deposit = CuratorDepositMin::get(); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1116,7 +1119,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1186,7 +1189,7 @@ fn close_parent_with_child_bounty() { assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::root(), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Try close parent-bounty again. // Should pass this time. @@ -1235,7 +1238,7 @@ fn children_curator_fee_calculation_test() { // Propose curator for child-bounty. assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); // Check curator fee added to the sum. - assert_eq!(ChildBounties::children_curator_fees(0), fee); + assert_eq!(pallet_child_bounties::ChildrenCuratorFees::::get(0), fee); // Accept curator for child-bounty. assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); // Award child-bounty. @@ -1244,7 +1247,7 @@ fn children_curator_fee_calculation_test() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1264,7 +1267,7 @@ fn children_curator_fee_calculation_test() { assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Award the parent bounty. assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 9)); -- GitLab From 4adfa37d14c0d81f09071687afb270ecdd5c2076 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 16 May 2024 12:43:56 +0200 Subject: [PATCH 021/106] [Runtime] Bound XCMP queue (#3952) Re-applying #2302 after increasing the `MaxPageSize`. Remove `without_storage_info` from the XCMP queue pallet. Part of https://github.com/paritytech/polkadot-sdk/issues/323 Changes: - Limit the number of messages and signals a HRMP channel can have at most. - Limit the number of HRML channels. A No-OP migration is put in place to ensure that all `BoundedVec`s still decode and not truncate after upgrade. The storage version is thereby bumped to 5 to have our tooling remind us to deploy that migration. ## Integration If you see this error in your try-runtime-cli: ```pre Max message size for channel is too large. This means that the V5 migration can be front-run and an attacker could place a large message just right before the migration to make other messages un-decodable. Please either increase `MaxPageSize` or decrease the `max_message_size` for this channel. Channel max: 102400, MaxPageSize: 65535 ``` Then increase the `MaxPageSize` of the `cumulus_pallet_xcmp_queue` to something like this: ```rust type MaxPageSize = ConstU32<{ 103 * 1024 }>; ``` There is currently no easy way for on-chain governance to adjust the HRMP max message size of all channels, but it could be done: https://github.com/paritytech/polkadot-sdk/issues/3145. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Francisco Aguirre --- cumulus/pallets/parachain-system/src/lib.rs | 9 +- cumulus/pallets/parachain-system/src/mock.rs | 2 +- cumulus/pallets/xcmp-queue/src/lib.rs | 120 +++++++++++++----- cumulus/pallets/xcmp-queue/src/migration.rs | 4 +- .../pallets/xcmp-queue/src/migration/v5.rs | 108 ++++++++++++++++ cumulus/pallets/xcmp-queue/src/mock.rs | 6 +- cumulus/pallets/xcmp-queue/src/tests.rs | 2 +- .../assets/asset-hub-rococo/src/lib.rs | 14 +- .../assets/asset-hub-westend/src/lib.rs | 14 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 16 ++- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 14 +- .../collectives-westend/src/lib.rs | 14 +- .../contracts/contracts-rococo/src/lib.rs | 3 +- .../contracts-rococo/src/xcm_config.rs | 11 +- .../coretime/coretime-rococo/src/lib.rs | 14 +- .../coretime/coretime-westend/src/lib.rs | 13 +- .../glutton/glutton-westend/src/lib.rs | 5 +- .../runtimes/people/people-rococo/src/lib.rs | 13 +- .../runtimes/people/people-westend/src/lib.rs | 13 +- .../runtimes/starters/shell/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/lib.rs | 8 +- .../testing/rococo-parachain/src/lib.rs | 8 +- cumulus/primitives/core/src/lib.rs | 8 ++ polkadot/parachain/src/primitives.rs | 14 +- prdoc/pr_3952.prdoc | 35 +++++ .../parachain/runtime/src/configs/mod.rs | 4 +- 26 files changed, 407 insertions(+), 67 deletions(-) create mode 100644 cumulus/pallets/xcmp-queue/src/migration/v5.rs create mode 100644 prdoc/pr_3952.prdoc diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index c8e7d1bb30f..7657dc4555e 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -30,7 +30,7 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, - GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, MessageSendError, + GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; @@ -1022,6 +1022,13 @@ impl FeeTracker for Pallet { } } +impl ListChannelInfos for Pallet { + fn outgoing_channels() -> Vec { + let Some(state) = RelevantMessagingState::::get() else { return Vec::new() }; + state.egress_channels.into_iter().map(|(id, _)| id).collect() + } +} + impl GetChannelInfo for Pallet { fn get_channel_status(id: ParaId) -> ChannelStatus { // Note, that we are using `relevant_messaging_state` which may be from the previous diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index fe89dfe68c6..e8d2eb70e26 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -122,7 +122,7 @@ impl pallet_message_queue::Config for Test { type Size = u32; type QueueChangeHandler = (); type QueuePausedQuery = (); - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MaxWeight; type IdleMaxServiceWeight = (); diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index cc785b66150..5633f05f13b 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -51,7 +51,7 @@ pub mod weights; pub use weights::WeightInfo; use bounded_collections::BoundedBTreeSet; -use codec::{Decode, DecodeLimit, Encode}; +use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use cumulus_primitives_core::{ relay_chain::BlockNumber as RelayBlockNumber, ChannelStatus, GetChannelInfo, MessageSendError, ParaId, XcmpMessageFormat, XcmpMessageHandler, XcmpMessageSource, @@ -59,7 +59,7 @@ use cumulus_primitives_core::{ use frame_support::{ defensive, defensive_assert, - traits::{EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, + traits::{Defensive, EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, weights::{Weight, WeightMeter}, BoundedVec, }; @@ -68,7 +68,7 @@ use polkadot_runtime_common::xcm_sender::PriceForMessageDelivery; use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; use sp_core::MAX_POSSIBLE_ALLOCATION; -use sp_runtime::{FixedU128, RuntimeDebug, Saturating}; +use sp_runtime::{FixedU128, RuntimeDebug, Saturating, WeakBoundedVec}; use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm, WrapVersion, MAX_XCM_DECODE_DEPTH}; use xcm_builder::InspectMessageQueues; @@ -106,7 +106,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::storage_version(migration::STORAGE_VERSION)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] @@ -133,6 +132,25 @@ pub mod pallet { #[pallet::constant] type MaxInboundSuspended: Get; + /// Maximal number of outbound XCMP channels that can have messages queued at the same time. + /// + /// If this is reached, then no further messages can be sent to channels that do not yet + /// have a message queued. This should be set to the expected maximum of outbound channels + /// which is determined by [`Self::ChannelInfo`]. It is important to set this large enough, + /// since otherwise the congestion control protocol will not work as intended and messages + /// may be dropped. This value increases the PoV and should therefore not be picked too + /// high. Governance needs to pay attention to not open more channels than this value. + #[pallet::constant] + type MaxActiveOutboundChannels: Get; + + /// The maximal page size for HRMP message pages. + /// + /// A lower limit can be set dynamically, but this is the hard-limit for the PoV worst case + /// benchmarking. The limit for the size of a message is slightly below this, since some + /// overhead is incurred for encoding the format. + #[pallet::constant] + type MaxPageSize: Get; + /// The origin that is allowed to resume or suspend the XCMP queue. type ControllerOrigin: EnsureOrigin; @@ -277,6 +295,10 @@ pub mod pallet { AlreadySuspended, /// The execution is already resumed. AlreadyResumed, + /// There are too many active outbound channels. + TooManyActiveOutboundChannels, + /// The message is too big. + TooBig, } /// The suspended inbound XCMP channels. All others are not suspended. @@ -298,19 +320,28 @@ pub mod pallet { /// case of the need to send a high-priority signal message this block. /// The bool is true if there is a signal message waiting to be sent. #[pallet::storage] - pub(super) type OutboundXcmpStatus = - StorageValue<_, Vec, ValueQuery>; + pub(super) type OutboundXcmpStatus = StorageValue< + _, + BoundedVec, + ValueQuery, + >; - // The new way of doing it: /// The messages outbound in a given XCMP channel. #[pallet::storage] - pub(super) type OutboundXcmpMessages = - StorageDoubleMap<_, Blake2_128Concat, ParaId, Twox64Concat, u16, Vec, ValueQuery>; + pub(super) type OutboundXcmpMessages = StorageDoubleMap< + _, + Blake2_128Concat, + ParaId, + Twox64Concat, + u16, + WeakBoundedVec, + ValueQuery, + >; /// Any signal messages waiting to be sent. #[pallet::storage] pub(super) type SignalMessages = - StorageMap<_, Blake2_128Concat, ParaId, Vec, ValueQuery>; + StorageMap<_, Blake2_128Concat, ParaId, WeakBoundedVec, ValueQuery>; /// The configuration which controls the dynamics of the outbound queue. #[pallet::storage] @@ -332,15 +363,14 @@ pub mod pallet { StorageMap<_, Twox64Concat, ParaId, FixedU128, ValueQuery, InitialFactor>; } -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum OutboundState { Ok, Suspended, } /// Struct containing detailed information about the outbound channel. -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug))] +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, RuntimeDebug, MaxEncodedLen)] pub struct OutboundChannelDetails { /// The `ParaId` of the parachain that this channel is connected with. recipient: ParaId, @@ -376,7 +406,7 @@ impl OutboundChannelDetails { } } -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct QueueConfigData { /// The number of pages which must be in the queue for the other side to be told to suspend /// their sending. @@ -479,7 +509,10 @@ impl Pallet { { details } else { - all_channels.push(OutboundChannelDetails::new(recipient)); + all_channels.try_push(OutboundChannelDetails::new(recipient)).map_err(|e| { + log::error!("Failed to activate HRMP channel: {:?}", e); + MessageSendError::TooManyChannels + })?; all_channels .last_mut() .expect("can't be empty; a new element was just pushed; qed") @@ -504,7 +537,9 @@ impl Pallet { if page.len() + encoded_fragment.len() > max_message_size { return None } - page.extend_from_slice(&encoded_fragment[..]); + for frag in encoded_fragment.iter() { + page.try_push(*frag).ok()?; + } Some(page.len()) }, ) @@ -522,7 +557,10 @@ impl Pallet { new_page.extend_from_slice(&encoded_fragment[..]); let last_page_size = new_page.len(); let number_of_pages = (channel_details.last_index - channel_details.first_index) as u32; - >::insert(recipient, page_index, new_page); + let bounded_page = BoundedVec::::try_from(new_page) + .map_err(|_| MessageSendError::TooBig)?; + let bounded_page = WeakBoundedVec::force_from(bounded_page.into_inner(), None); + >::insert(recipient, page_index, bounded_page); >::put(all_channels); (number_of_pages, last_page_size) }; @@ -544,17 +582,24 @@ impl Pallet { /// Sends a signal to the `dest` chain over XCMP. This is guaranteed to be dispatched on this /// block. - fn send_signal(dest: ParaId, signal: ChannelSignal) { + fn send_signal(dest: ParaId, signal: ChannelSignal) -> Result<(), Error> { let mut s = >::get(); if let Some(details) = s.iter_mut().find(|item| item.recipient == dest) { details.signals_exist = true; } else { - s.push(OutboundChannelDetails::new(dest).with_signals()); + s.try_push(OutboundChannelDetails::new(dest).with_signals()) + .map_err(|_| Error::::TooManyActiveOutboundChannels)?; } - >::mutate(dest, |page| { - *page = (XcmpMessageFormat::Signals, signal).encode(); - }); + + let page = BoundedVec::::try_from( + (XcmpMessageFormat::Signals, signal).encode(), + ) + .map_err(|_| Error::::TooBig)?; + let page = WeakBoundedVec::force_from(page.into_inner(), None); + + >::insert(dest, page); >::put(s); + Ok(()) } fn suspend_channel(target: ParaId) { @@ -564,7 +609,9 @@ impl Pallet { defensive_assert!(ok, "WARNING: Attempt to suspend channel that was not Ok."); details.state = OutboundState::Suspended; } else { - s.push(OutboundChannelDetails::new(target).with_suspended_state()); + if s.try_push(OutboundChannelDetails::new(target).with_suspended_state()).is_err() { + defensive!("Cannot pause channel; too many outbound channels"); + } } }); } @@ -665,18 +712,25 @@ impl OnQueueChanged for Pallet { let suspended = suspended_channels.contains(¶); if suspended && fp.ready_pages <= resume_threshold { - Self::send_signal(para, ChannelSignal::Resume); - - suspended_channels.remove(¶); - >::put(suspended_channels); + if let Err(err) = Self::send_signal(para, ChannelSignal::Resume) { + log::error!("defensive: Could not send resumption signal to inbound channel of sibling {:?}: {:?}; channel remains suspended.", para, err); + } else { + suspended_channels.remove(¶); + >::put(suspended_channels); + } } else if !suspended && fp.ready_pages >= suspend_threshold { log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para); - Self::send_signal(para, ChannelSignal::Suspend); - if let Err(err) = suspended_channels.try_insert(para) { + if let Err(err) = Self::send_signal(para, ChannelSignal::Suspend) { + // It will retry if `drop_threshold` is not reached, but it could be too late. + log::error!( + "defensive: Could not send suspension signal; future messages may be dropped: {:?}", err + ); + } else if let Err(err) = suspended_channels.try_insert(para) { log::error!("Too many channels suspended; cannot suspend sibling {:?}: {:?}; further messages may be dropped.", para, err); + } else { + >::put(suspended_channels); } - >::put(suspended_channels); } } } @@ -843,7 +897,7 @@ impl XcmpMessageSource for Pallet { // since it's so unlikely then for now we just drop it. defensive!("WARNING: oversize message in queue - dropping"); } else { - result.push((para_id, page)); + result.push((para_id, page.into_inner())); } let max_total_size = match T::ChannelInfo::get_channel_info(para_id) { @@ -891,7 +945,9 @@ impl XcmpMessageSource for Pallet { let pruned = old_statuses_len - statuses.len(); // removing an item from status implies a message being sent, so the result messages must // be no less than the pruned channels. - statuses.rotate_left(result.len().saturating_sub(pruned)); + let _ = statuses.try_rotate_left(result.len().saturating_sub(pruned)).defensive_proof( + "Could not store HRMP channels config. Some HRMP channels may be broken.", + ); >::put(statuses); diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 1702cd70bc2..b64982a8930 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -16,6 +16,8 @@ //! A module that is responsible for migration of storage. +pub mod v5; + use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAULT_POV_SIZE}; use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ @@ -25,7 +27,7 @@ use frame_support::{ }; /// The in-code storage version. -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); pub const LOG: &str = "runtime::xcmp-queue-migration"; diff --git a/cumulus/pallets/xcmp-queue/src/migration/v5.rs b/cumulus/pallets/xcmp-queue/src/migration/v5.rs new file mode 100644 index 00000000000..247adab7108 --- /dev/null +++ b/cumulus/pallets/xcmp-queue/src/migration/v5.rs @@ -0,0 +1,108 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Migrates the storage to version 5. + +use crate::*; +use cumulus_primitives_core::ListChannelInfos; +use frame_support::{pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade}; + +/// Configs needed to run the V5 migration. +pub trait V5Config: Config { + /// List all outbound channels with their target `ParaId` and maximum message size. + type ChannelList: ListChannelInfos; +} + +/// Ensures that the storage migrates cleanly to V5. +/// +/// The migration itself is a no-op, but it checks that none of the `BoundedVec`s would truncate on +/// the next decode after the upgrade was applied. +pub type MigrateV4ToV5 = frame_support::migrations::VersionedMigration< + 4, + 5, + unversioned::UncheckedMigrateV4ToV5, + Pallet, + ::DbWeight, +>; + +// V4 storage aliases +mod v4 { + use super::*; + + #[frame_support::storage_alias] + pub(super) type OutboundXcmpStatus = + StorageValue, Vec, ValueQuery>; + + #[frame_support::storage_alias] + pub(super) type OutboundXcmpMessages = StorageDoubleMap< + Pallet, + Blake2_128Concat, + ParaId, + Twox64Concat, + u16, + Vec, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(super) type SignalMessages = + StorageMap, Blake2_128Concat, ParaId, Vec, ValueQuery>; +} + +// Private module to hide the migration. +mod unversioned { + /// Please use [`MigrateV4ToV5`] instead. + pub struct UncheckedMigrateV4ToV5(core::marker::PhantomData); +} + +impl UncheckedOnRuntimeUpgrade for unversioned::UncheckedMigrateV4ToV5 { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + Default::default() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), sp_runtime::DispatchError> { + // We dont need any front-run protection for this since channels are opened by governance. + ensure!( + v4::OutboundXcmpStatus::::get().len() as u32 <= T::MaxActiveOutboundChannels::get(), + "Too many outbound channels. Close some channels or increase `MaxActiveOutboundChannels`." + ); + + ensure!(T::MaxPageSize::get() >= 16, "Sanity check failed: MaxPageSize too small"); + + // Check if any channels have a too large message max sizes. + let max_msg_len = T::MaxPageSize::get() - XcmpMessageFormat::max_encoded_len() as u32; + for channel in T::ChannelList::outgoing_channels() { + let info = T::ChannelInfo::get_channel_info(channel) + .expect("All listed channels must provide info"); + + if info.max_message_size > max_msg_len { + log::error!( + "Max message size for channel is too large. This means that the V5 \ + migration can be front-run and an attacker could place a large message just right \ + before the migration to make other messages un-decodable. Please either increase \ + `MaxPageSize` or decrease the `max_message_size` for this channel. Channel max: {}, \ + MaxPageSize: {}", + info.max_message_size, + max_msg_len + ); + return Err("Migration can be front-run".into()); + } + } + + Ok(()) + } +} diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index e258576aa3f..97121aa78e9 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -277,7 +277,11 @@ impl Config for Test { type ChannelInfo = MockedChannelInfo; type VersionWrapper = (); type XcmpQueue = EnqueueToLocalStorage>; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = SystemParachainAsSuperuser; type WeightInfo = (); diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index f48e9eec3ac..7c02059e5a9 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -520,7 +520,7 @@ fn hrmp_signals_are_prioritized() { }); // But a signal gets prioritized instead of the messages: - XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend); + assert_ok!(XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend)); let taken = XcmpQueue::take_outbound_messages(130); assert_eq!( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 0df9b65c714..b0df11e1046 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -670,7 +670,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -700,12 +700,21 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = xcm_config::XcmOriginToTransactDispatchOrigin; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -980,6 +989,7 @@ pub type Migrations = ( InitStorageVersions, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index b5c3ed5053c..062babef18d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -669,7 +669,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -697,13 +697,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -984,6 +993,7 @@ pub type Migrations = ( DeleteUndecodableStorage, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 2a7f46feee6..9043175a701 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -148,8 +148,9 @@ pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, - cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, snowbridge_pallet_system::migration::v0::InitializeOnUpgrade< Runtime, ConstU32, @@ -389,7 +390,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -417,13 +418,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 4c467010c7c..50911b4d780 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -129,6 +129,7 @@ pub type Migrations = ( InitStorageVersions, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -352,7 +353,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -379,13 +380,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 35b505d9da6..59005d0fa97 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -420,7 +420,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -448,13 +448,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EitherOfDiverse, Fellows>; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -727,6 +736,7 @@ type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index df39cd811d1..85a85e7086c 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -106,6 +106,7 @@ pub type Migrations = ( pallet_contracts::Migration, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -318,7 +319,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 8c337101986..ef5ded1731d 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -284,7 +284,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { cumulus_primitives_core::ParaId, parachains_common::message_queue::ParaIdToSibling, >; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, @@ -294,6 +298,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index ab925b04eb7..4f4935de133 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -110,6 +110,7 @@ pub type UncheckedExtrinsic = pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_broker::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, @@ -301,7 +302,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -341,13 +342,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 61c7b6e4958..fca1b0e7c6e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -302,7 +302,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -342,13 +342,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 424fa9cb7e7..b4ee0f5ae71 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -208,8 +208,9 @@ impl pallet_message_queue::Config for Runtime { >; type Size = u32; type QueueChangeHandler = (); - type QueuePausedQuery = (); // No XCMP queue pallet deployed. - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + // No XCMP queue pallet deployed. + type QueuePausedQuery = (); + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 544b2e78a46..68e34a0e567 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -281,7 +281,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -308,13 +308,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 50c818a2022..4d838fb9961 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,7 +281,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -308,13 +308,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index a3d1629bbe5..7422b580cc3 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -229,7 +229,7 @@ impl pallet_message_queue::Config for Runtime { // These need to be configured to the XCMP pallet - if it is deployed. type QueueChangeHandler = (); type QueuePausedQuery = (); - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 582154fec6d..86a8b0f1d9e 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -541,7 +541,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -569,7 +569,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index f22e900ba9e..b515e8ec5c9 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -317,7 +317,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = (); @@ -542,7 +542,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = (); // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = cumulus_pallet_xcmp_queue::weights::SubstrateWeight; diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 7f735368565..29216d51346 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -64,6 +64,8 @@ pub enum MessageSendError { TooBig, /// Some other error. Other, + /// There are too many channels open at once. + TooManyChannels, } impl From for &'static str { @@ -74,6 +76,7 @@ impl From for &'static str { NoChannel => "NoChannel", TooBig => "TooBig", Other => "Other", + TooManyChannels => "TooManyChannels", } } } @@ -135,6 +138,11 @@ pub trait GetChannelInfo { fn get_channel_info(id: ParaId) -> Option; } +/// List all open outgoing channels. +pub trait ListChannelInfos { + fn outgoing_channels() -> Vec; +} + /// Something that should be called when sending an upward message. pub trait UpwardMessageSender { /// Send the given UMP message; return the expected number of blocks before the message will diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 5a1efdf8982..27643843637 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -333,7 +333,19 @@ impl DmpMessageHandler for () { } /// The aggregate XCMP message format. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + TypeInfo, + RuntimeDebug, + MaxEncodedLen, +)] pub enum XcmpMessageFormat { /// Encoded `VersionedXcm` messages, all concatenated. ConcatenatedVersionedXcm, diff --git a/prdoc/pr_3952.prdoc b/prdoc/pr_3952.prdoc new file mode 100644 index 00000000000..2401adbb76c --- /dev/null +++ b/prdoc/pr_3952.prdoc @@ -0,0 +1,35 @@ +title: Storage bound the XCMP queue pallet + +doc: + - audience: Runtime Dev + description: | + Enforce upper limits for the number of active XCMP channels, the number of outgoing XCMP + messages per channel and the number of signals per channel. + + ## Integration + + If you see this error in your try-runtime-cli: + ```pre + Max message size for channel is too large. This means that the V5 migration can be front-run and an + attacker could place a large message just right before the migration to make other messages un-decodable. + Please either increase `MaxPageSize` or decrease the `max_message_size` for this channel. Channel max: + 102400, MaxPageSize: 65535 + ``` + + Then increase the `MaxPageSize` of the `cumulus_pallet_xcmp_queue` to something like this: + ```rust + type MaxPageSize = ConstU32<{ 103 * 1024 }>; + ``` + +migrations: + db: [] + + runtime: + - reference: cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5 + description: A No-OP migration is deployed to ensure that all `BoundedVec`s` still decode as expected. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: major + +host_functions: [] diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 0aec332feaf..63e6a67a906 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -221,7 +221,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = (); @@ -236,6 +236,8 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + type MaxPageSize = ConstU32<{ 1 << 16 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); -- GitLab From 3399bc09c60ea7de8766fef760a04c5a6b9ada15 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 16 May 2024 15:01:29 +0300 Subject: [PATCH 022/106] network/discovery: Add to DHT only peers that support genesis-based protocol (#3833) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds to the DHT only the peers that support the genesis/fork/kad protocol. Before this PR, any peer that supported the legacy `/kad/[id]` protocol was added to the DHT. This is the first step in removing the support for the legacy kad protocols. While I have adjusted unit tests to validate the appropriate behavior, this still needs proper testing in our stack. Part of https://github.com/paritytech/polkadot-sdk/issues/504. cc @paritytech/networking --------- Signed-off-by: Alexandru Vasile Co-authored-by: Bastian Kรถcher --- substrate/client/network/src/discovery.rs | 117 ++++++++++++++---- .../client/network/src/litep2p/discovery.rs | 13 +- 2 files changed, 105 insertions(+), 25 deletions(-) diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 4e2121c5540..7d4481b0d06 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -105,7 +105,8 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - kademlia_protocols: Vec>, + kademlia_protocol: Vec, + kademlia_legacy_protocol: Vec, kademlia_replication_factor: NonZeroUsize, } @@ -121,7 +122,8 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - kademlia_protocols: Vec::new(), + kademlia_protocol: Vec::new(), + kademlia_legacy_protocol: Vec::new(), kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR) .expect("value is a constant; constant is non-zero; qed."), } @@ -177,9 +179,8 @@ impl DiscoveryConfig { fork_id: Option<&str>, protocol_id: &ProtocolId, ) -> &mut Self { - self.kademlia_protocols = Vec::new(); - self.kademlia_protocols.push(kademlia_protocol_name(genesis_hash, fork_id)); - self.kademlia_protocols.push(legacy_kademlia_protocol_name(protocol_id)); + self.kademlia_protocol = kademlia_protocol_name(genesis_hash, fork_id); + self.kademlia_legacy_protocol = legacy_kademlia_protocol_name(protocol_id); self } @@ -207,14 +208,19 @@ impl DiscoveryConfig { discovery_only_if_under_num, enable_mdns, kademlia_disjoint_query_paths, - kademlia_protocols, + kademlia_protocol, + kademlia_legacy_protocol, kademlia_replication_factor, } = self; - let kademlia = if !kademlia_protocols.is_empty() { + let kademlia = if !kademlia_protocol.is_empty() { let mut config = KademliaConfig::default(); config.set_replication_factor(kademlia_replication_factor); + // Populate kad with both the legacy and the new protocol names. + // Remove the legacy protocol: + // https://github.com/paritytech/polkadot-sdk/issues/504 + let kademlia_protocols = [kademlia_protocol.clone(), kademlia_legacy_protocol]; config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the @@ -266,6 +272,7 @@ impl DiscoveryConfig { .expect("value is a constant; constant is non-zero; qed."), ), records_to_publish: Default::default(), + kademlia_protocol, } } } @@ -309,6 +316,11 @@ pub struct DiscoveryBehaviour { /// did not return the record(in `FinishedWithNoAdditionalRecord`). We will then put the record /// to these peers. records_to_publish: HashMap, + /// The chain based kademlia protocol name (including genesis hash and fork id). + /// + /// Remove when all nodes are upgraded to genesis hash and fork ID-based Kademlia: + /// . + kademlia_protocol: Vec, } impl DiscoveryBehaviour { @@ -366,23 +378,29 @@ impl DiscoveryBehaviour { return } - if let Some(matching_protocol) = supported_protocols + // The supported protocols must include the chain-based Kademlia protocol. + // + // Extract the chain-based Kademlia protocol from `kademlia.protocol_name()` + // when all nodes are upgraded to genesis hash and fork ID-based Kademlia: + // https://github.com/paritytech/polkadot-sdk/issues/504. + if !supported_protocols .iter() - .find(|p| kademlia.protocol_names().iter().any(|k| k.as_ref() == p.as_ref())) + .any(|p| p.as_ref() == self.kademlia_protocol.as_slice()) { - trace!( - target: "sub-libp2p", - "Adding self-reported address {} from {} to Kademlia DHT {}.", - addr, peer_id, String::from_utf8_lossy(matching_protocol.as_ref()), - ); - kademlia.add_address(peer_id, addr.clone()); - } else { trace!( target: "sub-libp2p", "Ignoring self-reported address {} from {} as remote node is not part of the \ Kademlia DHT supported by the local node.", addr, peer_id, ); + return } + + trace!( + target: "sub-libp2p", + "Adding self-reported address {} from {} to Kademlia DHT.", + addr, peer_id + ); + kademlia.add_address(peer_id, addr.clone()); } } @@ -1075,17 +1093,20 @@ mod tests { .unwrap(); // Test both genesis hash-based and legacy // protocol names. - let protocol_name = if swarm_n % 2 == 0 { - kademlia_protocol_name(genesis_hash, fork_id) + let protocol_names = if swarm_n % 2 == 0 { + vec![kademlia_protocol_name(genesis_hash, fork_id)] } else { - legacy_kademlia_protocol_name(&protocol_id) + vec![ + legacy_kademlia_protocol_name(&protocol_id), + kademlia_protocol_name(genesis_hash, fork_id), + ] }; swarms[swarm_n] .0 .behaviour_mut() .add_self_reported_address( &other, - &[protocol_name], + protocol_names.as_slice(), addr, ); @@ -1181,9 +1202,56 @@ mod tests { &[kademlia_protocol_name(supported_genesis_hash, None)], remote_addr.clone(), ); + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert!( + !kademlia + .kbucket(remote_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with supported protocol to be added." + ); + } + + let unsupported_peer_id = predictable_peer_id(b"00000000000000000000000000000002"); + let unsupported_peer_addr: Multiaddr = "/memory/2".parse().unwrap(); + + // Check the unsupported peer is not present before and after the call. + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert!( + kademlia + .kbucket(unsupported_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect unsupported peer not to be added." + ); + } + // Note: legacy protocol is not supported without genesis hash and fork ID, + // if the legacy is the only protocol supported, then the peer will not be added. + discovery.add_self_reported_address( + &unsupported_peer_id, + &[legacy_kademlia_protocol_name(&supported_protocol_id)], + unsupported_peer_addr.clone(), + ); + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert!( + kademlia + .kbucket(unsupported_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect unsupported peer not to be added." + ); + } + + // Supported legacy and genesis based protocols are allowed to be added. discovery.add_self_reported_address( &another_peer_id, - &[legacy_kademlia_protocol_name(&supported_protocol_id)], + &[ + legacy_kademlia_protocol_name(&supported_protocol_id), + kademlia_protocol_name(supported_genesis_hash, None), + ], another_addr.clone(), ); @@ -1194,6 +1262,13 @@ mod tests { kademlia.kbuckets().fold(0, |acc, bucket| acc + bucket.num_entries()), "Expect peers with supported protocol to be added." ); + assert!( + !kademlia + .kbucket(another_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with supported protocol to be added." + ); } } } diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 47a620db132..2120ea7c615 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -268,10 +268,10 @@ impl Discovery { allow_non_global_addresses: config.allow_non_globals_in_dht, public_addresses: config.public_addresses.iter().cloned().collect(), next_kad_query: Some(Delay::new(KADEMLIA_QUERY_INTERVAL)), - local_protocols: HashSet::from_iter([ - kademlia_protocol_name(genesis_hash, fork_id), - legacy_kademlia_protocol_name(protocol_id), - ]), + local_protocols: HashSet::from_iter([kademlia_protocol_name( + genesis_hash, + fork_id, + )]), }, ping_config, identify_config, @@ -295,6 +295,11 @@ impl Discovery { addresses: Vec, ) { if self.local_protocols.is_disjoint(&supported_protocols) { + log::trace!( + target: "sub-libp2p", + "Ignoring self-reported address of peer {peer} as remote node is not part of the \ + Kademlia DHT supported by the local node.", + ); return } -- GitLab From 453bb18c136375cff13000ad9b9dfc649734ab03 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 16 May 2024 15:32:56 +0300 Subject: [PATCH 023/106] Bridge: drop subscriptions when they are no longer required (#4481) The bridge relay is **not** using `tokio`, while `jsonrpsee` does. To make it work together, we are spawning a separate tokio task for every jsonrpsee subscription, which holds a subscription reference. It looks like we are not stopping those tasks when we no longer need it and when there are more than `1024` active subscriptions, `jsonrpsee` stops opening new subscriptions. This PR adds an `cancel` signal that is sent to the background task when we no longer need a subscription. --- bridges/relays/client-substrate/src/client.rs | 66 +++++++++++++++---- .../src/transaction_tracker.rs | 6 +- 2 files changed, 58 insertions(+), 14 deletions(-) diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs index afbda8599b2..2e7cb7455f7 100644 --- a/bridges/relays/client-substrate/src/client.rs +++ b/bridges/relays/client-substrate/src/client.rs @@ -77,7 +77,12 @@ pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: } /// Opaque justifications subscription type. -pub struct Subscription(pub(crate) Mutex>>); +pub struct Subscription( + pub(crate) Mutex>>, + // The following field is not explicitly used by the code. But when it is dropped, + // the bakground task receives a shutdown signal. + #[allow(dead_code)] pub(crate) futures::channel::oneshot::Sender<()>, +); /// Opaque GRANDPA authorities set. pub type OpaqueGrandpaAuthoritiesSet = Vec; @@ -621,6 +626,7 @@ impl Client { e })??; + let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); let (tracker, subscription) = self .jsonrpsee_execute(move |client| async move { @@ -639,7 +645,7 @@ impl Client { self_clone, stall_timeout, tx_hash, - Subscription(Mutex::new(receiver)), + Subscription(Mutex::new(receiver), cancel_sender), ); Ok((tracker, subscription)) }) @@ -649,6 +655,7 @@ impl Client { "extrinsic".into(), subscription, sender, + cancel_receiver, )); Ok(tracker) } @@ -790,14 +797,16 @@ impl Client { Ok(FC::subscribe_justifications(&client).await?) }) .await?; + let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); self.data.read().await.tokio.spawn(Subscription::background_worker( C::NAME.into(), "justification".into(), subscription, sender, + cancel_receiver, )); - Ok(Subscription(Mutex::new(receiver))) + Ok(Subscription(Mutex::new(receiver), cancel_sender)) } /// Generates a proof of key ownership for the given authority in the given set. @@ -843,9 +852,17 @@ impl Client { impl Subscription { /// Consumes subscription and returns future statuses stream. pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(self, |this| async { + futures::stream::unfold(Some(self), |mut this| async move { + let Some(this) = this.take() else { return None }; let item = this.0.lock().await.next().await.unwrap_or(None); - item.map(|i| (i, this)) + match item { + Some(item) => Some((item, Some(this))), + None => { + // let's make it explicit here + let _ = this.1.send(()); + None + }, + } }) } @@ -860,19 +877,35 @@ impl Subscription { async fn background_worker( chain_name: String, item_type: String, - mut subscription: jsonrpsee::core::client::Subscription, + subscription: jsonrpsee::core::client::Subscription, mut sender: futures::channel::mpsc::Sender>, + cancel_receiver: futures::channel::oneshot::Receiver<()>, ) { + log::trace!( + target: "bridge", + "Starting background worker for {} {} subscription stream.", + chain_name, + item_type, + ); + + futures::pin_mut!(subscription, cancel_receiver); loop { - match subscription.next().await { - Some(Ok(item)) => + match futures::future::select(subscription.next(), &mut cancel_receiver).await { + futures::future::Either::Left((Some(Ok(item)), _)) => if sender.send(Some(item)).await.is_err() { + log::trace!( + target: "bridge", + "{} {} subscription stream: no listener. Stopping background worker.", + chain_name, + item_type, + ); + break }, - Some(Err(e)) => { + futures::future::Either::Left((Some(Err(e)), _)) => { log::trace!( target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted.", + "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted. Stopping background worker.", chain_name, item_type, e, @@ -880,16 +913,25 @@ impl Subscription { let _ = sender.send(None).await; break }, - None => { + futures::future::Either::Left((None, _)) => { log::trace!( target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted.", + "{} {} subscription stream has returned None. Stream needs to be restarted. Stopping background worker.", chain_name, item_type, ); let _ = sender.send(None).await; break }, + futures::future::Either::Right((_, _)) => { + log::trace!( + target: "bridge", + "{} {} subscription stream: listener has been dropped. Stopping background worker.", + chain_name, + item_type, + ); + break; + }, } } } diff --git a/bridges/relays/client-substrate/src/transaction_tracker.rs b/bridges/relays/client-substrate/src/transaction_tracker.rs index 00375768c45..b181a945c2c 100644 --- a/bridges/relays/client-substrate/src/transaction_tracker.rs +++ b/bridges/relays/client-substrate/src/transaction_tracker.rs @@ -306,12 +306,13 @@ mod tests { TrackedTransactionStatus>, InvalidationStatus>, )> { + let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (mut sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver)), + Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), ); let wait_for_stall_timeout = futures::future::pending(); @@ -428,12 +429,13 @@ mod tests { #[async_std::test] async fn lost_on_timeout_when_waiting_for_invalidation_status() { + let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (_sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver)), + Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), ); let wait_for_stall_timeout = futures::future::ready(()).shared(); -- GitLab From 8d2939700da85ad8fa651ca390aaba57f65e76d1 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Thu, 16 May 2024 15:46:19 +0300 Subject: [PATCH 024/106] Demote per-peer validation slots warning to debug (#4480) Demote `Ignored block announcement because all validation slots for this peer are occupied.` message to debug level. This is mostly an indicator of somebody spamming the node or (more likely) some node actively keeping up with the network but not recognizing it's in a major sync mode, so sending zillions of block announcements (have seen this on Versi). This warning shouldn't be considered an error by the end user, so let's make it debug. Ref. https://github.com/paritytech/polkadot-sdk/issues/1929. --- substrate/client/network/sync/src/block_announce_validator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/network/sync/src/block_announce_validator.rs b/substrate/client/network/sync/src/block_announce_validator.rs index 3c994dd6944..cb1d5ee6b22 100644 --- a/substrate/client/network/sync/src/block_announce_validator.rs +++ b/substrate/client/network/sync/src/block_announce_validator.rs @@ -156,7 +156,7 @@ impl BlockAnnounceValidator { return }, AllocateSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { - warn!( + debug!( target: LOG_TARGET, "๐Ÿ’” Ignored block (#{} -- {}) announcement from {} because all validation slots for this peer are occupied.", number, -- GitLab From 04f88f5b03038acbdeb7475f543baf6b06d64f74 Mon Sep 17 00:00:00 2001 From: "polka.dom" Date: Thu, 16 May 2024 09:53:36 -0400 Subject: [PATCH 025/106] Remove pallet::getter usage from the democracy pallet (#4472) As per #3326, removes usage of the pallet::getter macro from the democracy pallet. The syntax `StorageItem::::get()` should be used instead. cc @muraca --- prdoc/pr_4472.prdoc | 14 ++++ substrate/frame/democracy/src/benchmarking.rs | 40 ++++----- substrate/frame/democracy/src/lib.rs | 84 +++++++++---------- substrate/frame/democracy/src/tests.rs | 2 +- .../frame/democracy/src/tests/cancellation.rs | 8 +- .../democracy/src/tests/external_proposing.rs | 8 +- .../democracy/src/tests/fast_tracking.rs | 8 +- .../frame/democracy/src/tests/metadata.rs | 8 +- .../democracy/src/tests/public_proposals.rs | 4 +- .../frame/democracy/src/tests/scheduling.rs | 12 +-- substrate/frame/democracy/src/tests/voting.rs | 4 +- 11 files changed, 100 insertions(+), 92 deletions(-) create mode 100644 prdoc/pr_4472.prdoc diff --git a/prdoc/pr_4472.prdoc b/prdoc/pr_4472.prdoc new file mode 100644 index 00000000000..cd7527d73d6 --- /dev/null +++ b/prdoc/pr_4472.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `pallet::getter` usage from pallet-democracy + +doc: + - audience: Runtime Dev + description: | + This PR removes the `pallet::getter`s from `pallet-democracy`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-democracy + bump: major diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index aa66137ad88..799d614c37f 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -108,7 +108,7 @@ benchmarks! { whitelist_account!(caller); }: _(RawOrigin::Signed(caller), proposal, value) verify { - assert_eq!(Democracy::::public_props().len(), p as usize, "Proposals not created."); + assert_eq!(PublicProps::::get().len(), p as usize, "Proposals not created."); } second { @@ -122,12 +122,12 @@ benchmarks! { Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } - let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; + let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); }: _(RawOrigin::Signed(caller), 0) verify { - let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; + let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); } @@ -175,7 +175,7 @@ benchmarks! { // Change vote from aye to nay let nay = Vote { aye: false, conviction: Conviction::Locked1x }; let new_vote = AccountVote::Standard { vote: nay, balance: 1000u32.into() }; - let ref_index = Democracy::::referendum_count() - 1; + let ref_index = ReferendumCount::::get() - 1; // This tests when a user changes a vote whitelist_account!(caller); @@ -186,7 +186,7 @@ benchmarks! { _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); - let referendum_info = Democracy::::referendum_info(ref_index) + let referendum_info = ReferendumInfoOf::::get(ref_index) .ok_or("referendum doesn't exist")?; let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, @@ -261,7 +261,7 @@ benchmarks! { }: _(origin, proposal) verify { // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); } external_propose_majority { @@ -271,7 +271,7 @@ benchmarks! { }: _(origin, proposal) verify { // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); } external_propose_default { @@ -281,7 +281,7 @@ benchmarks! { }: _(origin, proposal) verify { // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); } fast_track { @@ -303,7 +303,7 @@ benchmarks! { let delay = 0u32; }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) verify { - assert_eq!(Democracy::::referendum_count(), 1, "referendum not created"); + assert_eq!(ReferendumCount::::get(), 1, "referendum not created"); assert_last_event::(crate::Event::MetadataTransferred { prev_owner: MetadataOwner::External, owner: MetadataOwner::Referendum(0), @@ -338,7 +338,7 @@ benchmarks! { }: _(origin, proposal_hash) verify { assert!(NextExternal::::get().is_none()); - let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; + let (_, new_vetoers) = Blacklist::::get(&proposal_hash).ok_or("no blacklist")?; assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); } @@ -382,7 +382,7 @@ benchmarks! { add_referendum::(i); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); // Launch external LastTabledWasExternal::::put(false); @@ -393,15 +393,15 @@ benchmarks! { let call = Call::::external_propose_majority { proposal }; call.dispatch_bypass_filter(origin)?; // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); let block_number = T::LaunchPeriod::get(); }: { Democracy::::on_initialize(block_number) } verify { // One extra because of next external - assert_eq!(Democracy::::referendum_count(), r + 1, "referenda not created"); - ensure!(!>::exists(), "External wasn't taken"); + assert_eq!(ReferendumCount::::get(), r + 1, "referenda not created"); + ensure!(!NextExternal::::exists(), "External wasn't taken"); // All but the new next external should be finished for i in 0 .. r { @@ -422,7 +422,7 @@ benchmarks! { add_referendum::(i); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); // Launch public assert!(add_proposal::(r).is_ok(), "proposal not created"); @@ -433,7 +433,7 @@ benchmarks! { }: { Democracy::::on_initialize(block_number) } verify { // One extra because of next public - assert_eq!(Democracy::::referendum_count(), r + 1, "proposal not accepted"); + assert_eq!(ReferendumCount::::get(), r + 1, "proposal not accepted"); // All should be finished for i in 0 .. r { @@ -461,8 +461,8 @@ benchmarks! { ReferendumInfoOf::::insert(key, info); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); - assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); + assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); }: { Democracy::::on_initialize(1u32.into()) } verify { @@ -491,8 +491,8 @@ benchmarks! { ReferendumInfoOf::::insert(key, info); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); - assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); + assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); let block_number = T::LaunchPeriod::get(); diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index f3d33a72f3a..19cdc754659 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -346,12 +346,10 @@ pub mod pallet { /// The number of (public) proposals that have been made so far. #[pallet::storage] - #[pallet::getter(fn public_prop_count)] pub type PublicPropCount = StorageValue<_, PropIndex, ValueQuery>; /// The public proposals. Unsorted. The second item is the proposal. #[pallet::storage] - #[pallet::getter(fn public_props)] pub type PublicProps = StorageValue< _, BoundedVec<(PropIndex, BoundedCallOf, T::AccountId), T::MaxProposals>, @@ -362,7 +360,6 @@ pub mod pallet { /// /// TWOX-NOTE: Safe, as increasing integer keys are safe. #[pallet::storage] - #[pallet::getter(fn deposit_of)] pub type DepositOf = StorageMap< _, Twox64Concat, @@ -372,20 +369,17 @@ pub mod pallet { /// The next free referendum index, aka the number of referenda started so far. #[pallet::storage] - #[pallet::getter(fn referendum_count)] pub type ReferendumCount = StorageValue<_, ReferendumIndex, ValueQuery>; /// The lowest referendum index representing an unbaked referendum. Equal to /// `ReferendumCount` if there isn't a unbaked referendum. #[pallet::storage] - #[pallet::getter(fn lowest_unbaked)] pub type LowestUnbaked = StorageValue<_, ReferendumIndex, ValueQuery>; /// Information concerning any given referendum. /// /// TWOX-NOTE: SAFE as indexes are not under an attackerโ€™s control. #[pallet::storage] - #[pallet::getter(fn referendum_info)] pub type ReferendumInfoOf = StorageMap< _, Twox64Concat, @@ -595,15 +589,15 @@ pub mod pallet { let who = T::SubmitOrigin::ensure_origin(origin)?; ensure!(value >= T::MinimumDeposit::get(), Error::::ValueLow); - let index = Self::public_prop_count(); + let index = PublicPropCount::::get(); let real_prop_count = PublicProps::::decode_len().unwrap_or(0) as u32; let max_proposals = T::MaxProposals::get(); ensure!(real_prop_count < max_proposals, Error::::TooMany); let proposal_hash = proposal.hash(); - if let Some((until, _)) = >::get(proposal_hash) { + if let Some((until, _)) = Blacklist::::get(proposal_hash) { ensure!( - >::block_number() >= until, + frame_system::Pallet::::block_number() >= until, Error::::ProposalBlacklisted, ); } @@ -638,11 +632,11 @@ pub mod pallet { let seconds = Self::len_of_deposit_of(proposal).ok_or(Error::::ProposalMissing)?; ensure!(seconds < T::MaxDeposits::get(), Error::::TooMany); - let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; + let mut deposit = DepositOf::::get(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; let ok = deposit.0.try_push(who.clone()).is_ok(); debug_assert!(ok, "`seconds` is below static limit; `try_insert` should succeed; qed"); - >::insert(proposal, deposit); + DepositOf::::insert(proposal, deposit); Self::deposit_event(Event::::Seconded { seconder: who, prop_index: proposal }); Ok(()) } @@ -683,9 +677,9 @@ pub mod pallet { let status = Self::referendum_status(ref_index)?; let h = status.proposal.hash(); - ensure!(!>::contains_key(h), Error::::AlreadyCanceled); + ensure!(!Cancellations::::contains_key(h), Error::::AlreadyCanceled); - >::insert(h, true); + Cancellations::::insert(h, true); Self::internal_cancel_referendum(ref_index); Ok(()) } @@ -703,14 +697,14 @@ pub mod pallet { proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; - ensure!(!>::exists(), Error::::DuplicateProposal); - if let Some((until, _)) = >::get(proposal.hash()) { + ensure!(!NextExternal::::exists(), Error::::DuplicateProposal); + if let Some((until, _)) = Blacklist::::get(proposal.hash()) { ensure!( - >::block_number() >= until, + frame_system::Pallet::::block_number() >= until, Error::::ProposalBlacklisted, ); } - >::put((proposal, VoteThreshold::SuperMajorityApprove)); + NextExternal::::put((proposal, VoteThreshold::SuperMajorityApprove)); Ok(()) } @@ -732,7 +726,7 @@ pub mod pallet { proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; - >::put((proposal, VoteThreshold::SimpleMajority)); + NextExternal::::put((proposal, VoteThreshold::SimpleMajority)); Ok(()) } @@ -754,7 +748,7 @@ pub mod pallet { proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalDefaultOrigin::ensure_origin(origin)?; - >::put((proposal, VoteThreshold::SuperMajorityAgainst)); + NextExternal::::put((proposal, VoteThreshold::SuperMajorityAgainst)); Ok(()) } @@ -800,15 +794,15 @@ pub mod pallet { ensure!(voting_period > Zero::zero(), Error::::VotingPeriodLow); let (ext_proposal, threshold) = - >::get().ok_or(Error::::ProposalMissing)?; + NextExternal::::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, ); ensure!(proposal_hash == ext_proposal.hash(), Error::::InvalidHash); - >::kill(); - let now = >::block_number(); + NextExternal::::kill(); + let now = frame_system::Pallet::::block_number(); let ref_index = Self::inject_referendum( now.saturating_add(voting_period), ext_proposal, @@ -840,7 +834,7 @@ pub mod pallet { } let mut existing_vetoers = - >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); + Blacklist::::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); let insert_position = existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; existing_vetoers @@ -848,11 +842,11 @@ pub mod pallet { .map_err(|_| Error::::TooMany)?; let until = - >::block_number().saturating_add(T::CooloffPeriod::get()); - >::insert(&proposal_hash, (until, existing_vetoers)); + frame_system::Pallet::::block_number().saturating_add(T::CooloffPeriod::get()); + Blacklist::::insert(&proposal_hash, (until, existing_vetoers)); Self::deposit_event(Event::::Vetoed { who, proposal_hash, until }); - >::kill(); + NextExternal::::kill(); Self::clear_metadata(MetadataOwner::External); Ok(()) } @@ -943,7 +937,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::clear_public_proposals())] pub fn clear_public_proposals(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; - >::kill(); + PublicProps::::kill(); Ok(()) } @@ -1146,7 +1140,7 @@ pub mod pallet { ) -> DispatchResult { match owner { MetadataOwner::External => { - let (_, threshold) = >::get().ok_or(Error::::NoProposal)?; + let (_, threshold) = NextExternal::::get().ok_or(Error::::NoProposal)?; Self::ensure_external_origin(threshold, origin)?; }, MetadataOwner::Proposal(index) => { @@ -1201,7 +1195,7 @@ impl Pallet { /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal /// index. pub fn backing_for(proposal: PropIndex) -> Option> { - Self::deposit_of(proposal).map(|(l, d)| d.saturating_mul((l.len() as u32).into())) + DepositOf::::get(proposal).map(|(l, d)| d.saturating_mul((l.len() as u32).into())) } /// Get all referenda ready for tally at block `n`. @@ -1209,8 +1203,8 @@ impl Pallet { n: BlockNumberFor, ) -> Vec<(ReferendumIndex, ReferendumStatus, BoundedCallOf, BalanceOf>)> { - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); + let next = LowestUnbaked::::get(); + let last = ReferendumCount::::get(); Self::maturing_referenda_at_inner(n, next..last) } @@ -1221,7 +1215,7 @@ impl Pallet { { range .into_iter() - .map(|i| (i, Self::referendum_info(i))) + .map(|i| (i, ReferendumInfoOf::::get(i))) .filter_map(|(i, maybe_info)| match maybe_info { Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), _ => None, @@ -1238,8 +1232,8 @@ impl Pallet { threshold: VoteThreshold, delay: BlockNumberFor, ) -> ReferendumIndex { - >::inject_referendum( - >::block_number().saturating_add(T::VotingPeriod::get()), + Pallet::::inject_referendum( + frame_system::Pallet::::block_number().saturating_add(T::VotingPeriod::get()), proposal, threshold, delay, @@ -1529,12 +1523,12 @@ impl Pallet { threshold: VoteThreshold, delay: BlockNumberFor, ) -> ReferendumIndex { - let ref_index = Self::referendum_count(); + let ref_index = ReferendumCount::::get(); ReferendumCount::::put(ref_index + 1); let status = ReferendumStatus { end, proposal, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); - >::insert(ref_index, item); + ReferendumInfoOf::::insert(ref_index, item); Self::deposit_event(Event::::Started { ref_index, threshold }); ref_index } @@ -1551,7 +1545,7 @@ impl Pallet { /// Table the waiting external proposal for a vote, if there is one. fn launch_external(now: BlockNumberFor) -> DispatchResult { - if let Some((proposal, threshold)) = >::take() { + if let Some((proposal, threshold)) = NextExternal::::take() { LastTabledWasExternal::::put(true); Self::deposit_event(Event::::ExternalTabled); let ref_index = Self::inject_referendum( @@ -1569,15 +1563,15 @@ impl Pallet { /// Table the waiting public proposal with the highest backing for a vote. fn launch_public(now: BlockNumberFor) -> DispatchResult { - let mut public_props = Self::public_props(); + let mut public_props = PublicProps::::get(); if let Some((winner_index, _)) = public_props.iter().enumerate().max_by_key( // defensive only: All current public proposals have an amount locked |x| Self::backing_for((x.1).0).defensive_unwrap_or_else(Zero::zero), ) { let (prop_index, proposal, _) = public_props.swap_remove(winner_index); - >::put(public_props); + PublicProps::::put(public_props); - if let Some((depositors, deposit)) = >::take(prop_index) { + if let Some((depositors, deposit)) = DepositOf::::take(prop_index) { // refund depositors for d in depositors.iter() { T::Currency::unreserve(d, deposit); @@ -1642,8 +1636,8 @@ impl Pallet { let max_block_weight = T::BlockWeights::get().max_block; let mut weight = Weight::zero(); - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); + let next = LowestUnbaked::::get(); + let last = ReferendumCount::::get(); let r = last.saturating_sub(next); // pick out another public referendum if it's time. @@ -1674,9 +1668,9 @@ impl Pallet { // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number // of unbaked referendum is bounded by this number. In case those number have changed in a // runtime upgrade the formula should be adjusted but the bound should still be sensible. - >::mutate(|ref_index| { + LowestUnbaked::::mutate(|ref_index| { while *ref_index < last && - Self::referendum_info(*ref_index) + ReferendumInfoOf::::get(*ref_index) .map_or(true, |info| matches!(info, ReferendumInfo::Finished { .. })) { *ref_index += 1 @@ -1692,7 +1686,7 @@ impl Pallet { fn len_of_deposit_of(proposal: PropIndex) -> Option { // DepositOf first tuple element is a vec, decoding its len is equivalent to decode a // `Compact`. - decode_compact_u32_at(&>::hashed_key_for(proposal)) + decode_compact_u32_at(&DepositOf::::hashed_key_for(proposal)) } /// Return a proposal of an index. diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index e2946ba9815..9303c0da504 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -194,7 +194,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { #[test] fn params_should_work() { new_test_ext().execute_with(|| { - assert_eq!(Democracy::referendum_count(), 0); + assert_eq!(ReferendumCount::::get(), 0); assert_eq!(Balances::free_balance(42), 0); assert_eq!(Balances::total_issuance(), 210); }); diff --git a/substrate/frame/democracy/src/tests/cancellation.rs b/substrate/frame/democracy/src/tests/cancellation.rs index b4c42f9c790..eeb1df301db 100644 --- a/substrate/frame/democracy/src/tests/cancellation.rs +++ b/substrate/frame/democracy/src/tests/cancellation.rs @@ -30,14 +30,14 @@ fn cancel_referendum_should_work() { ); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r.into())); - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); next_block(); - assert_eq!(Democracy::lowest_unbaked(), 1); - assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + assert_eq!(LowestUnbaked::::get(), 1); + assert_eq!(LowestUnbaked::::get(), ReferendumCount::::get()); assert_eq!(Balances::free_balance(42), 0); }); } @@ -56,7 +56,7 @@ fn emergency_cancel_should_work() { assert_noop!(Democracy::emergency_cancel(RuntimeOrigin::signed(3), r), BadOrigin); assert_ok!(Democracy::emergency_cancel(RuntimeOrigin::signed(4), r)); - assert!(Democracy::referendum_info(r).is_none()); + assert!(ReferendumInfoOf::::get(r).is_none()); // some time later... diff --git a/substrate/frame/democracy/src/tests/external_proposing.rs b/substrate/frame/democracy/src/tests/external_proposing.rs index 08b497ab4b9..78ef2904e5b 100644 --- a/substrate/frame/democracy/src/tests/external_proposing.rs +++ b/substrate/frame/democracy/src/tests/external_proposing.rs @@ -24,12 +24,12 @@ fn veto_external_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); - assert!(>::exists()); + assert!(NextExternal::::exists()); let h = set_balance_proposal(2).hash(); assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); // cancelled. - assert!(!>::exists()); + assert!(!NextExternal::::exists()); // fails - same proposal can't be resubmitted. assert_noop!( Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), @@ -46,7 +46,7 @@ fn veto_external_works() { fast_forward_to(2); // works; as we're out of the cooloff period. assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); - assert!(>::exists()); + assert!(NextExternal::::exists()); // 3 can't veto the same thing twice. assert_noop!( @@ -57,7 +57,7 @@ fn veto_external_works() { // 4 vetoes. assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(4), h)); // cancelled again. - assert!(!>::exists()); + assert!(!NextExternal::::exists()); fast_forward_to(3); // same proposal fails as we're still in cooloff diff --git a/substrate/frame/democracy/src/tests/fast_tracking.rs b/substrate/frame/democracy/src/tests/fast_tracking.rs index 85e7792a4c2..89dce1dffe1 100644 --- a/substrate/frame/democracy/src/tests/fast_tracking.rs +++ b/substrate/frame/democracy/src/tests/fast_tracking.rs @@ -33,13 +33,13 @@ fn fast_track_referendum_works() { set_balance_proposal(2) )); let hash = note_preimage(1); - assert!(>::get(MetadataOwner::External).is_none()); + assert!(MetadataOf::::get(MetadataOwner::External).is_none()); assert_ok!(Democracy::set_metadata( RuntimeOrigin::signed(3), MetadataOwner::External, Some(hash), ),); - assert!(>::get(MetadataOwner::External).is_some()); + assert!(MetadataOf::::get(MetadataOwner::External).is_some()); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 2, 0)); assert_eq!( @@ -53,8 +53,8 @@ fn fast_track_referendum_works() { }) ); // metadata reset from the external proposal to the referendum. - assert!(>::get(MetadataOwner::External).is_none()); - assert!(>::get(MetadataOwner::Referendum(0)).is_some()); + assert!(MetadataOf::::get(MetadataOwner::External).is_none()); + assert!(MetadataOf::::get(MetadataOwner::Referendum(0)).is_some()); }); } diff --git a/substrate/frame/democracy/src/tests/metadata.rs b/substrate/frame/democracy/src/tests/metadata.rs index 1b6d66a8bc4..341f14e5586 100644 --- a/substrate/frame/democracy/src/tests/metadata.rs +++ b/substrate/frame/democracy/src/tests/metadata.rs @@ -33,7 +33,7 @@ fn set_external_metadata_works() { ); // create an external proposal. assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); - assert!(>::exists()); + assert!(NextExternal::::exists()); // fails to set metadata with non external origin. assert_noop!( Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(invalid_hash)), @@ -61,7 +61,7 @@ fn clear_metadata_works() { let owner = MetadataOwner::External; // create an external proposal. assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); - assert!(>::exists()); + assert!(NextExternal::::exists()); // set metadata. let hash = note_preimage(1); assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(2), owner.clone(), Some(hash))); @@ -87,7 +87,7 @@ fn set_proposal_metadata_works() { // create an external proposal. assert_ok!(propose_set_balance(1, 2, 5)); // metadata owner is a public proposal. - let owner = MetadataOwner::Proposal(Democracy::public_prop_count() - 1); + let owner = MetadataOwner::Proposal(PublicPropCount::::get() - 1); // fails to set non-existing preimage. assert_noop!( Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(invalid_hash)), @@ -115,7 +115,7 @@ fn clear_proposal_metadata_works() { // create an external proposal. assert_ok!(propose_set_balance(1, 2, 5)); // metadata owner is a public proposal. - let owner = MetadataOwner::Proposal(Democracy::public_prop_count() - 1); + let owner = MetadataOwner::Proposal(PublicPropCount::::get() - 1); // set metadata. let hash = note_preimage(1); assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(hash))); diff --git a/substrate/frame/democracy/src/tests/public_proposals.rs b/substrate/frame/democracy/src/tests/public_proposals.rs index 69a2d3e2568..01f47947f8e 100644 --- a/substrate/frame/democracy/src/tests/public_proposals.rs +++ b/substrate/frame/democracy/src/tests/public_proposals.rs @@ -97,10 +97,10 @@ fn cancel_proposal_should_work() { MetadataOwner::Proposal(0), Some(hash) )); - assert!(>::get(MetadataOwner::Proposal(0)).is_some()); + assert!(MetadataOf::::get(MetadataOwner::Proposal(0)).is_some()); assert_ok!(Democracy::cancel_proposal(RuntimeOrigin::root(), 0)); // metadata cleared, preimage unrequested. - assert!(>::get(MetadataOwner::Proposal(0)).is_none()); + assert!(MetadataOf::::get(MetadataOwner::Proposal(0)).is_none()); System::assert_has_event(crate::Event::ProposalCanceled { prop_index: 0 }.into()); System::assert_last_event( crate::Event::MetadataCleared { owner: MetadataOwner::Proposal(0), hash }.into(), diff --git a/substrate/frame/democracy/src/tests/scheduling.rs b/substrate/frame/democracy/src/tests/scheduling.rs index fdbc8fdb349..43f51628aaf 100644 --- a/substrate/frame/democracy/src/tests/scheduling.rs +++ b/substrate/frame/democracy/src/tests/scheduling.rs @@ -30,10 +30,10 @@ fn simple_passing_should_work() { ); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); next_block(); - assert_eq!(Democracy::lowest_unbaked(), 1); + assert_eq!(LowestUnbaked::::get(), 1); assert_eq!(Balances::free_balance(42), 2); }); } @@ -140,16 +140,16 @@ fn lowest_unbaked_should_be_sensible() { assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r2, aye(1))); // r3 is canceled assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r3.into())); - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); // r2 ends with approval - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); // r1 ends with approval - assert_eq!(Democracy::lowest_unbaked(), 3); - assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + assert_eq!(LowestUnbaked::::get(), 3); + assert_eq!(LowestUnbaked::::get(), ReferendumCount::::get()); // r2 is executed assert_eq!(Balances::free_balance(42), 2); diff --git a/substrate/frame/democracy/src/tests/voting.rs b/substrate/frame/democracy/src/tests/voting.rs index f096b633ee6..61b80cc97fe 100644 --- a/substrate/frame/democracy/src/tests/voting.rs +++ b/substrate/frame/democracy/src/tests/voting.rs @@ -65,13 +65,13 @@ fn single_proposal_should_work() { System::set_block_number(0); assert_ok!(propose_set_balance(1, 2, 1)); let r = 0; - assert!(Democracy::referendum_info(r).is_none()); + assert!(ReferendumInfoOf::::get(r).is_none()); // start of 2 => next referendum scheduled. fast_forward_to(2); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(ReferendumCount::::get(), 1); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { -- GitLab From 943eb46ed54c2fcd9fab693b86ef59ce18c0f792 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Thu, 16 May 2024 15:54:28 +0200 Subject: [PATCH 026/106] Snowbridge - Ethereum Client - Reject finalized updates without a sync committee in next store period (#4478) While syncing Ethereum consensus updates to the Snowbridge Ethereum light client, the syncing process stalled due to error `InvalidSyncCommitteeUpdate` when importing the next sync committee for period `1087`. This bug manifested specifically because our light client checkpoint is a few weeks old (submitted to governance weeks ago) and had to catchup until a recent block. Since then, we have done thorough testing of the catchup sync process. ### Symptoms - Import next sync committee for period `1086` (essentially period `1087`). Light client store period = `1086`. - Import header in period `1087`. Light client store period = `1087`. The current and next sync committee is not updated, and is now in an outdated state. (current sync committee = `1086` and current sync committee = `1087`, where it should be current sync committee = `1087` and current sync committee = `None`) - Import next sync committee for period `1087` (essentially period `1088`) fails because the expected next sync committee's roots don't match. ### Bug The bug here is that the current and next sync committee's didn't handover when an update in the next period was received. ### Fix There are two possible fixes here: 1. Correctly handover sync committees when a header in the next period is received. 2. Reject updates in the next period until the next sync committee period is known. We opted for solution 2, which is more conservative and requires less changes. ### Polkadot-sdk versions This fix should be backported in polkadot-sdk versions 1.7 and up. Snowfork PR: https://github.com/Snowfork/polkadot-sdk/pull/145 --------- Co-authored-by: Vincent Geddes <117534+vgeddes@users.noreply.github.com> --- .../snowbridge/pallets/ethereum-client/src/lib.rs | 7 +++++++ .../pallets/ethereum-client/src/tests.rs | 14 +++++++++++++- prdoc/pr_4478.prdoc | 13 +++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_4478.prdoc diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index c1b9e19729b..0ba1b8df465 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -104,6 +104,7 @@ pub mod pallet { #[pallet::error] pub enum Error { SkippedSyncCommitteePeriod, + SyncCommitteeUpdateRequired, /// Attested header is older than latest finalized header. IrrelevantUpdate, NotBootstrapped, @@ -320,6 +321,7 @@ pub mod pallet { // Verify update is relevant. let update_attested_period = compute_period(update.attested_header.slot); + let update_finalized_period = compute_period(update.finalized_header.slot); let update_has_next_sync_committee = !>::exists() && (update.next_sync_committee_update.is_some() && update_attested_period == store_period); @@ -395,6 +397,11 @@ pub mod pallet { ), Error::::InvalidSyncCommitteeMerkleProof ); + } else { + ensure!( + update_finalized_period == store_period, + Error::::SyncCommitteeUpdateRequired + ); } // Verify sync committee aggregate signature. diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 765958c1282..da762dc2fd8 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -362,13 +362,14 @@ fn submit_update_with_sync_committee_in_current_period() { } #[test] -fn submit_update_in_next_period() { +fn reject_submit_update_in_next_period() { let checkpoint = Box::new(load_checkpoint_update_fixture()); let sync_committee_update = Box::new(load_sync_committee_update_fixture()); let update = Box::new(load_next_finalized_header_update_fixture()); let sync_committee_period = compute_period(sync_committee_update.finalized_header.slot); let next_sync_committee_period = compute_period(update.finalized_header.slot); assert_eq!(sync_committee_period + 1, next_sync_committee_period); + let next_sync_committee_update = Box::new(load_next_sync_committee_update_fixture()); new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); @@ -376,6 +377,17 @@ fn submit_update_in_next_period() { RuntimeOrigin::signed(1), sync_committee_update.clone() )); + // check an update in the next period is rejected + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone()), + Error::::SyncCommitteeUpdateRequired + ); + // submit update with next sync committee + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + next_sync_committee_update + )); + // check same header in the next period can now be submitted successfully assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); let block_root: H256 = update.finalized_header.clone().hash_tree_root().unwrap(); assert!(>::contains_key(block_root)); diff --git a/prdoc/pr_4478.prdoc b/prdoc/pr_4478.prdoc new file mode 100644 index 00000000000..22e2e43db4c --- /dev/null +++ b/prdoc/pr_4478.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Snowbridge - Ethereum Client - Reject finalized updates without a sync committee in next store period + +doc: + - audience: Runtime Dev + description: | + Bug fix in the Ethereum light client that stalls the light client when an update in the next sync committee period is received without receiving the next sync committee update in the next period. + +crates: + - name: snowbridge-pallet-ethereum-client + bump: patch -- GitLab From d5fe478e4fe2d62b0800888ae77b00ff0ba28b28 Mon Sep 17 00:00:00 2001 From: Jesse Chejieh Date: Thu, 16 May 2024 17:22:29 +0100 Subject: [PATCH 027/106] Adds `MaxRank` Config in `pallet-core-fellowship` (#3393) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit resolves #3315 --------- Co-authored-by: doordashcon Co-authored-by: command-bot <> Co-authored-by: Bastian Kรถcher --- .../collectives-westend/src/ambassador/mod.rs | 1 + .../collectives-westend/src/fellowship/mod.rs | 1 + .../collectives-westend/src/lib.rs | 7 +- prdoc/pr_3393.prdoc | 12 ++ substrate/bin/node/runtime/src/lib.rs | 1 + .../frame/core-fellowship/src/benchmarking.rs | 24 ++-- substrate/frame/core-fellowship/src/lib.rs | 69 +++++++--- .../frame/core-fellowship/src/migration.rs | 111 ++++++++++++++++ .../core-fellowship/src/tests/integration.rs | 14 +- .../frame/core-fellowship/src/tests/unit.rs | 29 +++-- .../frame/core-fellowship/src/weights.rs | 122 +++++++++--------- 11 files changed, 280 insertions(+), 111 deletions(-) create mode 100644 prdoc/pr_3393.prdoc create mode 100644 substrate/frame/core-fellowship/src/migration.rs diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs index 0c9f428c139..ceef6de6b74 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -220,6 +220,7 @@ impl pallet_core_fellowship::Config for Runtime { type ApproveOrigin = PromoteOrigin; type PromoteOrigin = PromoteOrigin; type EvidenceSize = ConstU32<65536>; + type MaxRank = ConstU32<9>; } pub type AmbassadorSalaryInstance = pallet_salary::Instance2; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 94765287637..6a4a1820796 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -210,6 +210,7 @@ impl pallet_core_fellowship::Config for Runtime { EnsureCanPromoteTo, >; type EvidenceSize = ConstU32<65536>; + type MaxRank = ConstU32<9>; } pub type FellowshipSalaryInstance = pallet_salary::Instance1; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 59005d0fa97..5cb24c4edb7 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -44,8 +44,9 @@ pub mod xcm_config; pub mod fellowship; pub use ambassador::pallet_ambassador_origins; +use ambassador::AmbassadorCoreInstance; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use fellowship::{pallet_fellowship_origins, Fellows}; +use fellowship::{pallet_fellowship_origins, Fellows, FellowshipCoreInstance}; use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -739,6 +740,10 @@ type Migrations = ( cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + // unreleased + pallet_core_fellowship::migration::MigrateV0ToV1, + // unreleased + pallet_core_fellowship::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. diff --git a/prdoc/pr_3393.prdoc b/prdoc/pr_3393.prdoc new file mode 100644 index 00000000000..27ebb385930 --- /dev/null +++ b/prdoc/pr_3393.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add `MaxRank` Config to `pallet-core-fellowship` + +doc: + - audience: Runtime User + description: | + This PR adds a new Config `MaxRank` to the core fellowship pallet. Initially, the maximum rank was set to IX (Grand Master) on the core-fellowship pallet, corresponding to the establishment of the Technical Fellowship and setting the default member count to nine. However, with the introduction of new collectives, this maximum rank is expected to evolve. + +crates: + - name: pallet-core-fellowship diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 017ee9100f9..e05f8f61c30 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1869,6 +1869,7 @@ impl pallet_core_fellowship::Config for Runtime { type ApproveOrigin = EnsureRootWithSuccess>; type PromoteOrigin = EnsureRootWithSuccess>; type EvidenceSize = ConstU32<16_384>; + type MaxRank = ConstU32<9>; } parameter_types! { diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index fd5453310be..b3ee3ab7d16 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -54,11 +54,12 @@ mod benchmarks { } fn set_benchmark_params, I: 'static>() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); let params = ParamsType { - active_salary: [100u32.into(); 9], - passive_salary: [10u32.into(); 9], - demotion_period: [100u32.into(); 9], - min_promotion_period: [100u32.into(); 9], + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), offboard_timeout: 1u32.into(), }; @@ -68,11 +69,12 @@ mod benchmarks { #[benchmark] fn set_params() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); let params = ParamsType { - active_salary: [100u32.into(); 9], - passive_salary: [10u32.into(); 9], - demotion_period: [100u32.into(); 9], - min_promotion_period: [100u32.into(); 9], + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), offboard_timeout: 1u32.into(), }; @@ -151,10 +153,14 @@ mod benchmarks { fn promote() -> Result<(), BenchmarkError> { // Ensure that the `min_promotion_period` wont get in our way. let mut params = Params::::get(); - params.min_promotion_period = [Zero::zero(); RANK_COUNT]; + let max_rank = T::MaxRank::get().try_into().unwrap(); + params.min_promotion_period = BoundedVec::try_from(vec![Zero::zero(); max_rank]).unwrap(); Params::::put(¶ms); let member = make_member::(1)?; + + // Set it to the max value to ensure that any possible auto-demotion period has passed. + frame_system::Pallet::::set_block_number(BlockNumberFor::::max_value()); ensure_evidence::(&member)?; #[extrinsic_call] diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index afb188261fd..94339b85d05 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -61,7 +61,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_arithmetic::traits::{Saturating, Zero}; use sp_runtime::RuntimeDebug; -use sp_std::{marker::PhantomData, prelude::*}; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use frame_support::{ defensive, @@ -71,7 +71,7 @@ use frame_support::{ tokens::Balance as BalanceTrait, EnsureOrigin, EnsureOriginWithArg, Get, RankedMembers, RankedMembersSwapHandler, }, - BoundedVec, + BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; #[cfg(test)] @@ -79,10 +79,11 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; pub mod weights; pub use pallet::*; -pub use weights::WeightInfo; +pub use weights::*; /// The desired outcome for which evidence is presented. #[derive(Encode, Decode, Eq, PartialEq, Copy, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] @@ -100,29 +101,46 @@ pub enum Wish { pub type Evidence = BoundedVec>::EvidenceSize>; /// The status of the pallet instance. -#[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub struct ParamsType { +#[derive( + Encode, + Decode, + CloneNoBound, + EqNoBound, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, + MaxEncodedLen, +)] +#[scale_info(skip_type_params(Ranks))] +pub struct ParamsType< + Balance: Clone + Eq + PartialEq + Debug, + BlockNumber: Clone + Eq + PartialEq + Debug, + Ranks: Get, +> { /// The amounts to be paid when a member of a given rank (-1) is active. - active_salary: [Balance; RANKS], + pub active_salary: BoundedVec, /// The amounts to be paid when a member of a given rank (-1) is passive. - passive_salary: [Balance; RANKS], + pub passive_salary: BoundedVec, /// The period between which unproven members become demoted. - demotion_period: [BlockNumber; RANKS], + pub demotion_period: BoundedVec, /// The period between which members must wait before they may proceed to this rank. - min_promotion_period: [BlockNumber; RANKS], + pub min_promotion_period: BoundedVec, /// Amount by which an account can remain at rank 0 (candidate before being offboard entirely). - offboard_timeout: BlockNumber, + pub offboard_timeout: BlockNumber, } -impl Default - for ParamsType +impl< + Balance: Default + Copy + Eq + Debug, + BlockNumber: Default + Copy + Eq + Debug, + Ranks: Get, + > Default for ParamsType { fn default() -> Self { Self { - active_salary: [Balance::default(); RANKS], - passive_salary: [Balance::default(); RANKS], - demotion_period: [BlockNumber::default(); RANKS], - min_promotion_period: [BlockNumber::default(); RANKS], + active_salary: Default::default(), + passive_salary: Default::default(), + demotion_period: Default::default(), + min_promotion_period: Default::default(), offboard_timeout: BlockNumber::default(), } } @@ -148,11 +166,11 @@ pub mod pallet { traits::{tokens::GetSalary, EnsureOrigin}, }; use frame_system::{ensure_root, pallet_prelude::*}; - - /// Number of available ranks. - pub(crate) const RANK_COUNT: usize = 9; + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData<(T, I)>); #[pallet::config] @@ -194,9 +212,16 @@ pub mod pallet { /// The maximum size in bytes submitted evidence is allowed to be. #[pallet::constant] type EvidenceSize: Get; + + /// Represents the highest possible rank in this pallet. + /// + /// Increasing this value is supported, but decreasing it may lead to a broken state. + #[pallet::constant] + type MaxRank: Get; } - pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, RANK_COUNT>; + pub type ParamsOf = + ParamsType<>::Balance, BlockNumberFor, >::MaxRank>; pub type MemberStatusOf = MemberStatus>; pub type RankOf = <>::Members as RankedMembers>::Rank; @@ -338,8 +363,10 @@ pub mod pallet { #[pallet::call_index(1)] pub fn set_params(origin: OriginFor, params: Box>) -> DispatchResult { T::ParamsOrigin::ensure_origin_or_root(origin)?; + Params::::put(params.as_ref()); Self::deposit_event(Event::::ParamsChanged { params: *params }); + Ok(()) } @@ -540,7 +567,7 @@ pub mod pallet { /// in the range `1..=RANK_COUNT` is `None`. pub(crate) fn rank_to_index(rank: RankOf) -> Option { match TryInto::::try_into(rank) { - Ok(r) if r <= RANK_COUNT && r > 0 => Some(r - 1), + Ok(r) if r as u32 <= >::MaxRank::get() && r > 0 => Some(r - 1), _ => return None, } } diff --git a/substrate/frame/core-fellowship/src/migration.rs b/substrate/frame/core-fellowship/src/migration.rs new file mode 100644 index 00000000000..b8b5540a4b4 --- /dev/null +++ b/substrate/frame/core-fellowship/src/migration.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the core-fellowship pallet. +use super::*; +use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{DefensiveTruncateFrom, UncheckedOnRuntimeUpgrade}, + BoundedVec, +}; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +mod v0 { + use frame_system::pallet_prelude::BlockNumberFor; + + use super::*; + + #[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] + pub struct ParamsType { + pub active_salary: [Balance; RANKS], + pub passive_salary: [Balance; RANKS], + pub demotion_period: [BlockNumber; RANKS], + pub min_promotion_period: [BlockNumber; RANKS], + pub offboard_timeout: BlockNumber, + } + + impl Default + for ParamsType + { + fn default() -> Self { + Self { + active_salary: [Balance::default(); RANKS], + passive_salary: [Balance::default(); RANKS], + demotion_period: [BlockNumber::default(); RANKS], + min_promotion_period: [BlockNumber::default(); RANKS], + offboard_timeout: BlockNumber::default(), + } + } + } + + /// Number of available ranks from old version. + pub(crate) const RANK_COUNT: usize = 9; + + pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, RANK_COUNT>; + + /// V0 type for [`crate::Params`]. + #[storage_alias] + pub type Params, I: 'static> = + StorageValue, ParamsOf, ValueQuery>; +} + +pub struct MigrateToV1(PhantomData<(T, I)>); +impl, I: 'static> UncheckedOnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + ensure!( + T::MaxRank::get() >= v0::RANK_COUNT as u32, + "pallet-core-fellowship: new bound should not truncate" + ); + Ok(Default::default()) + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + let old_value = v0::Params::::take(); + // Write the new value to storage + let new = crate::ParamsType { + active_salary: BoundedVec::defensive_truncate_from(old_value.active_salary.to_vec()), + passive_salary: BoundedVec::defensive_truncate_from(old_value.passive_salary.to_vec()), + demotion_period: BoundedVec::defensive_truncate_from( + old_value.demotion_period.to_vec(), + ), + min_promotion_period: BoundedVec::defensive_truncate_from( + old_value.min_promotion_period.to_vec(), + ), + offboard_timeout: old_value.offboard_timeout, + }; + crate::Params::::put(new); + T::DbWeight::get().reads_writes(1, 1) + } +} + +/// [`UncheckedOnRuntimeUpgrade`] implementation [`MigrateToV1`] wrapped in a +/// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: +/// - The migration only runs once when the on-chain storage version is 0 +/// - The on-chain storage version is updated to `1` after the migration executes +/// - Reads/Writes from checking/settings the on-chain storage version are accounted for +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + MigrateToV1, + crate::pallet::Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index d3bbac15805..f3137316658 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -25,8 +25,9 @@ use frame_support::{ }; use frame_system::EnsureSignedBy; use pallet_ranked_collective::{EnsureRanked, Geometric, Rank, TallyOf, Votes}; -use sp_core::Get; +use sp_core::{ConstU32, Get}; use sp_runtime::{ + bounded_vec, traits::{Convert, ReduceBy, ReplaceWithDefault, TryMorphInto}, BuildStorage, DispatchError, }; @@ -78,6 +79,7 @@ impl Config for Test { type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; type EvidenceSize = EvidenceSize; + type MaxRank = ConstU32<9>; } pub struct TestPolls; @@ -163,11 +165,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { + assert_ok!(Club::add_member(RuntimeOrigin::root(), 100)); + promote_n_times(100, 9); let params = ParamsType { - active_salary: [10, 20, 30, 40, 50, 60, 70, 80, 90], - passive_salary: [1, 2, 3, 4, 5, 6, 7, 8, 9], - demotion_period: [2, 4, 6, 8, 10, 12, 14, 16, 18], - min_promotion_period: [3, 6, 9, 12, 15, 18, 21, 24, 27], + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 4, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], offboard_timeout: 1, }; assert_ok!(CoreFellowship::set_params(signed(1), Box::new(params))); diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs index 669517d61a4..9245e5159a9 100644 --- a/substrate/frame/core-fellowship/src/tests/unit.rs +++ b/substrate/frame/core-fellowship/src/tests/unit.rs @@ -27,7 +27,7 @@ use frame_support::{ traits::{tokens::GetSalary, ConstU32, IsInVec, TryMapSuccess}, }; use frame_system::EnsureSignedBy; -use sp_runtime::{traits::TryMorphInto, BuildStorage, DispatchError, DispatchResult}; +use sp_runtime::{bounded_vec, traits::TryMorphInto, BuildStorage, DispatchError, DispatchResult}; use crate as pallet_core_fellowship; use crate::*; @@ -116,19 +116,22 @@ impl Config for Test { type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; type EvidenceSize = ConstU32<1024>; + type MaxRank = ConstU32<9>; } pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { + set_rank(100, 9); let params = ParamsType { - active_salary: [10, 20, 30, 40, 50, 60, 70, 80, 90], - passive_salary: [1, 2, 3, 4, 5, 6, 7, 8, 9], - demotion_period: [2, 4, 6, 8, 10, 12, 14, 16, 18], - min_promotion_period: [3, 6, 9, 12, 15, 18, 21, 24, 27], + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 4, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], offboard_timeout: 1, }; + assert_ok!(CoreFellowship::set_params(signed(1), Box::new(params))); System::set_block_number(1); }); @@ -170,10 +173,10 @@ fn basic_stuff() { fn set_params_works() { new_test_ext().execute_with(|| { let params = ParamsType { - active_salary: [10, 20, 30, 40, 50, 60, 70, 80, 90], - passive_salary: [1, 2, 3, 4, 5, 6, 7, 8, 9], - demotion_period: [1, 2, 3, 4, 5, 6, 7, 8, 9], - min_promotion_period: [1, 2, 3, 4, 5, 10, 15, 20, 30], + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + min_promotion_period: bounded_vec![1, 2, 3, 4, 5, 10, 15, 20, 30], offboard_timeout: 1, }; assert_noop!( @@ -284,10 +287,10 @@ fn offboard_works() { fn infinite_demotion_period_works() { new_test_ext().execute_with(|| { let params = ParamsType { - active_salary: [10; 9], - passive_salary: [10; 9], - min_promotion_period: [10; 9], - demotion_period: [0; 9], + active_salary: bounded_vec![10, 10, 10, 10, 10, 10, 10, 10, 10], + passive_salary: bounded_vec![10, 10, 10, 10, 10, 10, 10, 10, 10], + min_promotion_period: bounded_vec![10, 10, 10, 10, 10, 10, 10, 10, 10], + demotion_period: bounded_vec![0, 0, 0, 0, 0, 0, 0, 0, 0], offboard_timeout: 0, }; assert_ok!(CoreFellowship::set_params(signed(1), Box::new(params))); diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 1e42335067a..8fad6f585c1 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_core_fellowship -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/core-fellowship/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_core_fellowship +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/core-fellowship/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -67,13 +65,13 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `CoreFellowship::Params` (r:0 w:1) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_836_000 picoseconds. - Weight::from_parts(7_057_000, 0) + // Minimum execution time: 7_633_000 picoseconds. + Weight::from_parts(8_018_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -81,7 +79,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -92,10 +90,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `17274` + // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 55_535_000 picoseconds. - Weight::from_parts(57_104_000, 19894) + // Minimum execution time: 57_597_000 picoseconds. + Weight::from_parts(58_825_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -104,7 +102,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -115,10 +113,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `17384` + // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 59_111_000 picoseconds. - Weight::from_parts(61_394_000, 19894) + // Minimum execution time: 61_387_000 picoseconds. + Weight::from_parts(63_408_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -130,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 16_166_000 picoseconds. - Weight::from_parts(16_773_000, 3514) + // Minimum execution time: 15_941_000 picoseconds. + Weight::from_parts(16_547_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 25_508_000 picoseconds. - Weight::from_parts(25_952_000, 3514) + // Minimum execution time: 24_963_000 picoseconds. + Weight::from_parts(25_873_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,7 +157,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `CoreFellowship::Member` (r:1 w:1) /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) @@ -170,10 +168,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `17252` + // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 51_102_000 picoseconds. - Weight::from_parts(53_302_000, 19894) + // Minimum execution time: 55_062_000 picoseconds. + Weight::from_parts(58_422_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -187,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 16_035_000 picoseconds. - Weight::from_parts(16_529_000, 3514) + // Minimum execution time: 15_901_000 picoseconds. + Weight::from_parts(16_746_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -200,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_966_000 picoseconds. - Weight::from_parts(15_340_000, 3514) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_421_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -215,8 +213,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 35_137_000 picoseconds. - Weight::from_parts(36_285_000, 19894) + // Minimum execution time: 36_925_000 picoseconds. + Weight::from_parts(38_330_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -228,8 +226,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 24_307_000 picoseconds. - Weight::from_parts(25_426_000, 19894) + // Minimum execution time: 25_210_000 picoseconds. + Weight::from_parts(26_247_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -238,13 +236,13 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `CoreFellowship::Params` (r:0 w:1) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_836_000 picoseconds. - Weight::from_parts(7_057_000, 0) + // Minimum execution time: 7_633_000 picoseconds. + Weight::from_parts(8_018_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -252,7 +250,7 @@ impl WeightInfo for () { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -263,10 +261,10 @@ impl WeightInfo for () { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `17274` + // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 55_535_000 picoseconds. - Weight::from_parts(57_104_000, 19894) + // Minimum execution time: 57_597_000 picoseconds. + Weight::from_parts(58_825_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -275,7 +273,7 @@ impl WeightInfo for () { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -286,10 +284,10 @@ impl WeightInfo for () { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `17384` + // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 59_111_000 picoseconds. - Weight::from_parts(61_394_000, 19894) + // Minimum execution time: 61_387_000 picoseconds. + Weight::from_parts(63_408_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -301,8 +299,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 16_166_000 picoseconds. - Weight::from_parts(16_773_000, 3514) + // Minimum execution time: 15_941_000 picoseconds. + Weight::from_parts(16_547_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -320,8 +318,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 25_508_000 picoseconds. - Weight::from_parts(25_952_000, 3514) + // Minimum execution time: 24_963_000 picoseconds. + Weight::from_parts(25_873_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -330,7 +328,7 @@ impl WeightInfo for () { /// Storage: `CoreFellowship::Member` (r:1 w:1) /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) @@ -341,10 +339,10 @@ impl WeightInfo for () { /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `17252` + // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 51_102_000 picoseconds. - Weight::from_parts(53_302_000, 19894) + // Minimum execution time: 55_062_000 picoseconds. + Weight::from_parts(58_422_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -358,8 +356,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 16_035_000 picoseconds. - Weight::from_parts(16_529_000, 3514) + // Minimum execution time: 15_901_000 picoseconds. + Weight::from_parts(16_746_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -371,8 +369,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_966_000 picoseconds. - Weight::from_parts(15_340_000, 3514) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_421_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -386,8 +384,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 35_137_000 picoseconds. - Weight::from_parts(36_285_000, 19894) + // Minimum execution time: 36_925_000 picoseconds. + Weight::from_parts(38_330_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -399,8 +397,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 24_307_000 picoseconds. - Weight::from_parts(25_426_000, 19894) + // Minimum execution time: 25_210_000 picoseconds. + Weight::from_parts(26_247_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -- GitLab From f86f2131fe0066cf9009cb909e843da664b3df98 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 17 May 2024 07:52:19 +0200 Subject: [PATCH 028/106] Contracts: remove kitchensink dynamic parameters (#4489) Using Dynamic Parameters for contracts seems like a bad idea for now. Given that we have benchmarks for each host function (in addition to our extrinsics), parameter storage reads will be counted multiple times. We will work on updates to the benchmarking framework to mitigate this issue in future iterations. --------- Co-authored-by: command-bot <> --- substrate/bin/node/runtime/src/lib.rs | 26 +- substrate/frame/contracts/src/weights.rs | 1472 +++++++++++----------- 2 files changed, 710 insertions(+), 788 deletions(-) diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index e05f8f61c30..b1f948afa56 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1341,6 +1341,9 @@ impl pallet_tips::Config for Runtime { } parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); pub Schedule: pallet_contracts::Schedule = Default::default(); pub CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30); } @@ -1358,9 +1361,9 @@ impl pallet_contracts::Config for Runtime { /// change because that would break already deployed contracts. The `Call` structure itself /// is not allowed to change the indices of existing pallets, too. type CallFilter = Nothing; - type DepositPerItem = dynamic_params::contracts::DepositPerItem; - type DepositPerByte = dynamic_params::contracts::DepositPerByte; - type DefaultDepositLimit = dynamic_params::contracts::DefaultDepositLimit; + type DepositPerItem = DepositPerItem; + type DepositPerByte = DepositPerByte; + type DefaultDepositLimit = DefaultDepositLimit; type CallStack = [pallet_contracts::Frame; 5]; type WeightPrice = pallet_transaction_payment::Pallet; type WeightInfo = pallet_contracts::weights::SubstrateWeight; @@ -2182,19 +2185,6 @@ pub mod dynamic_params { #[codec(index = 1)] pub static ByteDeposit: Balance = 1 * CENTS; } - - #[dynamic_pallet_params] - #[codec(index = 1)] - pub mod contracts { - #[codec(index = 0)] - pub static DepositPerItem: Balance = deposit(1, 0); - - #[codec(index = 1)] - pub static DepositPerByte: Balance = deposit(0, 1); - - #[codec(index = 2)] - pub static DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); - } } #[cfg(feature = "runtime-benchmarks")] @@ -2220,10 +2210,6 @@ impl EnsureOriginWithArg for DynamicParamet frame_system::ensure_root(origin.clone()).map_err(|_| origin)?; return Ok(()) }, - RuntimeParametersKey::Contracts(_) => { - frame_system::ensure_root(origin.clone()).map_err(|_| origin)?; - return Ok(()) - }, } } diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index b95b1d1a9a2..950476698cd 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -143,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_149_000 picoseconds. - Weight::from_parts(2_274_000, 1627) + // Minimum execution time: 2_002_000 picoseconds. + Weight::from_parts(2_193_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -154,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ยฑ0)` // Estimated: `442 + k * (70 ยฑ0)` - // Minimum execution time: 12_863_000 picoseconds. - Weight::from_parts(13_188_000, 442) - // Standard Error: 1_053 - .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) + // Minimum execution time: 12_339_000 picoseconds. + Weight::from_parts(12_682_000, 442) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(1_163_234, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -171,10 +171,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ยฑ0)` // Estimated: `6149 + c * (1 ยฑ0)` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(9_203_290, 6149) + // Minimum execution time: 8_145_000 picoseconds. + Weight::from_parts(8_747_247, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_154, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -187,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 17_177_000 picoseconds. - Weight::from_parts(17_663_000, 6450) + // Minimum execution time: 16_950_000 picoseconds. + Weight::from_parts(17_498_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ยฑ0)` // Estimated: `3635 + k * (1 ยฑ0)` - // Minimum execution time: 3_636_000 picoseconds. - Weight::from_parts(3_774_000, 3635) - // Standard Error: 542 - .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) + // Minimum execution time: 3_431_000 picoseconds. + Weight::from_parts(2_161_027, 3635) + // Standard Error: 949 + .saturating_add(Weight::from_parts(1_219_406, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -214,8 +214,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -223,13 +221,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (1 ยฑ0)` - // Estimated: `6266 + c * (1 ยฑ0)` - // Minimum execution time: 21_585_000 picoseconds. - Weight::from_parts(22_069_944, 6266) + // Measured: `325 + c * (1 ยฑ0)` + // Estimated: `6263 + c * (1 ยฑ0)` + // Minimum execution time: 16_384_000 picoseconds. + Weight::from_parts(16_741_331, 6263) // Standard Error: 1 - .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(375, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -239,8 +237,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 13_283_000 picoseconds. - Weight::from_parts(14_015_000, 6380) + // Minimum execution time: 12_529_000 picoseconds. + Weight::from_parts(13_319_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -254,8 +252,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 48_022_000 picoseconds. - Weight::from_parts(49_627_000, 6292) + // Minimum execution time: 47_462_000 picoseconds. + Weight::from_parts(48_784_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,8 +265,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 58_374_000 picoseconds. - Weight::from_parts(59_615_000, 6534) + // Minimum execution time: 55_712_000 picoseconds. + Weight::from_parts(58_629_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -278,8 +276,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 12_559_000 picoseconds. - Weight::from_parts(12_947_000, 6349) + // Minimum execution time: 11_992_000 picoseconds. + Weight::from_parts(12_686_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -289,8 +287,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_680_000, 1627) + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_594_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,8 +300,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_625_000 picoseconds. - Weight::from_parts(13_094_000, 3631) + // Minimum execution time: 12_179_000 picoseconds. + Weight::from_parts(12_805_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -313,8 +311,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_836_000 picoseconds. - Weight::from_parts(5_182_000, 3607) + // Minimum execution time: 4_695_000 picoseconds. + Weight::from_parts(5_105_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -325,8 +323,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_319_000 picoseconds. - Weight::from_parts(6_582_000, 3632) + // Minimum execution time: 6_223_000 picoseconds. + Weight::from_parts(6_509_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -337,15 +335,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_532_000 picoseconds. - Weight::from_parts(6_909_000, 3607) + // Minimum execution time: 6_073_000 picoseconds. + Weight::from_parts(6_524_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -361,20 +357,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `804 + c * (1 ยฑ0)` - // Estimated: `9217 + c * (1 ยฑ0)` - // Minimum execution time: 305_778_000 picoseconds. - Weight::from_parts(282_321_249, 9217) - // Standard Error: 72 - .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `801 + c * (1 ยฑ0)` + // Estimated: `6739 + c * (1 ยฑ0)` + // Minimum execution time: 289_627_000 picoseconds. + Weight::from_parts(281_167_857, 6739) + // Standard Error: 68 + .saturating_add(Weight::from_parts(33_442, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) @@ -396,17 +390,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `8740` - // Minimum execution time: 3_810_809_000 picoseconds. - Weight::from_parts(739_511_598, 8740) - // Standard Error: 140 - .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) + // Measured: `323` + // Estimated: `8737` + // Minimum execution time: 3_829_638_000 picoseconds. + Weight::from_parts(744_994_885, 8737) + // Standard Error: 165 + .saturating_add(Weight::from_parts(68_083, 0).saturating_mul(c.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_484, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_581, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -415,8 +409,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -433,21 +425,19 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` - // Estimated: `8982` - // Minimum execution time: 1_986_789_000 picoseconds. - Weight::from_parts(2_017_466_000, 8982) - // Standard Error: 26 - .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) - // Standard Error: 26 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + // Measured: `560` + // Estimated: `6504` + // Minimum execution time: 1_960_218_000 picoseconds. + Weight::from_parts(1_976_273_000, 6504) + // Standard Error: 25 + .saturating_add(Weight::from_parts(866, 0).saturating_mul(i.into())) + // Standard Error: 25 + .saturating_add(Weight::from_parts(824, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -462,17 +452,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `829` - // Estimated: `9244` - // Minimum execution time: 210_724_000 picoseconds. - Weight::from_parts(218_608_000, 9244) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `826` + // Estimated: `6766` + // Minimum execution time: 200_542_000 picoseconds. + Weight::from_parts(209_713_000, 6766) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) @@ -484,19 +472,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_enforced(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 271_259_000 picoseconds. - Weight::from_parts(298_852_854, 6085) - // Standard Error: 65 - .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 258_375_000 picoseconds. + Weight::from_parts(271_214_455, 3607) + // Standard Error: 61 + .saturating_add(Weight::from_parts(32_587, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) @@ -508,13 +494,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_relaxed(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 278_167_000 picoseconds. - Weight::from_parts(311_888_941, 6085) - // Standard Error: 58 - .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 279_363_000 picoseconds. + Weight::from_parts(257_721_413, 3607) + // Standard Error: 81 + .saturating_add(Weight::from_parts(33_850, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -531,8 +517,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 47_403_000 picoseconds. - Weight::from_parts(48_707_000, 3780) + // Minimum execution time: 45_096_000 picoseconds. + Weight::from_parts(46_661_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -548,8 +534,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 35_361_000 picoseconds. - Weight::from_parts(36_714_000, 8967) + // Minimum execution time: 34_260_000 picoseconds. + Weight::from_parts(35_761_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -558,10 +544,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_340_000 picoseconds. - Weight::from_parts(9_360_237, 0) - // Standard Error: 269 - .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) + // Minimum execution time: 10_265_000 picoseconds. + Weight::from_parts(10_174_088, 0) + // Standard Error: 275 + .saturating_add(Weight::from_parts(271_791, 0).saturating_mul(r.into())) } /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -570,10 +556,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `509 + r * (77 ยฑ0)` // Estimated: `1467 + r * (2552 ยฑ0)` - // Minimum execution time: 9_059_000 picoseconds. - Weight::from_parts(9_201_000, 1467) - // Standard Error: 5_643 - .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) + // Minimum execution time: 10_498_000 picoseconds. + Weight::from_parts(10_551_000, 1467) + // Standard Error: 5_538 + .saturating_add(Weight::from_parts(3_269_462, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) } @@ -584,10 +570,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `517 + r * (170 ยฑ0)` // Estimated: `1468 + r * (2645 ยฑ0)` - // Minimum execution time: 9_220_000 picoseconds. - Weight::from_parts(9_399_000, 1468) - // Standard Error: 6_194 - .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) + // Minimum execution time: 10_289_000 picoseconds. + Weight::from_parts(10_469_000, 1468) + // Standard Error: 5_674 + .saturating_add(Weight::from_parts(4_105_274, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) } @@ -596,50 +582,50 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(10_100_456, 0) - // Standard Error: 234 - .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) + // Minimum execution time: 10_769_000 picoseconds. + Weight::from_parts(10_389_944, 0) + // Standard Error: 240 + .saturating_add(Weight::from_parts(350_466, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_524_000 picoseconds. - Weight::from_parts(10_813_389, 0) - // Standard Error: 76 - .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) + // Minimum execution time: 10_443_000 picoseconds. + Weight::from_parts(11_651_820, 0) + // Standard Error: 91 + .saturating_add(Weight::from_parts(100_579, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_799_000 picoseconds. - Weight::from_parts(10_886_744, 0) - // Standard Error: 75 - .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) + // Minimum execution time: 10_474_000 picoseconds. + Weight::from_parts(11_313_654, 0) + // Standard Error: 103 + .saturating_add(Weight::from_parts(85_902, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_895_000 picoseconds. - Weight::from_parts(10_658_338, 0) - // Standard Error: 189 - .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) + // Minimum execution time: 10_360_000 picoseconds. + Weight::from_parts(11_283_384, 0) + // Standard Error: 163 + .saturating_add(Weight::from_parts(253_111, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_643_000 picoseconds. - Weight::from_parts(10_932_126, 0) - // Standard Error: 153 - .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) + // Minimum execution time: 10_289_000 picoseconds. + Weight::from_parts(10_747_872, 0) + // Standard Error: 197 + .saturating_add(Weight::from_parts(299_097, 0).saturating_mul(r.into())) } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) @@ -648,10 +634,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3599` - // Minimum execution time: 9_548_000 picoseconds. - Weight::from_parts(9_737_000, 3599) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) + // Minimum execution time: 10_368_000 picoseconds. + Weight::from_parts(29_685_372, 3599) + // Standard Error: 1_202 + .saturating_add(Weight::from_parts(1_517_645, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -659,40 +645,40 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_172_000 picoseconds. - Weight::from_parts(18_255_933, 0) - // Standard Error: 540 - .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) + // Minimum execution time: 10_528_000 picoseconds. + Weight::from_parts(11_653_603, 0) + // Standard Error: 203 + .saturating_add(Weight::from_parts(241_937, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_232_000 picoseconds. - Weight::from_parts(9_796_584, 0) - // Standard Error: 208 - .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) + // Minimum execution time: 10_385_000 picoseconds. + Weight::from_parts(11_483_212, 0) + // Standard Error: 227 + .saturating_add(Weight::from_parts(248_076, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_747_000 picoseconds. - Weight::from_parts(8_733_230, 0) - // Standard Error: 377 - .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) + // Minimum execution time: 10_341_000 picoseconds. + Weight::from_parts(12_055_382, 0) + // Standard Error: 1_231 + .saturating_add(Weight::from_parts(249_662, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(10_194_153, 0) - // Standard Error: 516 - .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) + // Minimum execution time: 10_467_000 picoseconds. + Weight::from_parts(10_579_667, 0) + // Standard Error: 247 + .saturating_add(Weight::from_parts(246_711, 0).saturating_mul(r.into())) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -701,10 +687,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 9_022_000 picoseconds. - Weight::from_parts(22_051_160, 1552) - // Standard Error: 697 - .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) + // Minimum execution time: 10_293_000 picoseconds. + Weight::from_parts(18_229_738, 1552) + // Standard Error: 452 + .saturating_add(Weight::from_parts(655_277, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -712,17 +698,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_135_000 picoseconds. - Weight::from_parts(10_646_215, 0) - // Standard Error: 161 - .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) + // Minimum execution time: 10_355_000 picoseconds. + Weight::from_parts(11_641_920, 0) + // Standard Error: 166 + .saturating_add(Weight::from_parts(168_271, 0).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -736,13 +720,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `9287` - // Minimum execution time: 273_896_000 picoseconds. - Weight::from_parts(148_309_654, 9287) + // Measured: `869` + // Estimated: `6809` + // Minimum execution time: 268_424_000 picoseconds. + Weight::from_parts(136_261_773, 6809) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(Weight::from_parts(1_373, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// The range of component `r` is `[0, 1]`. @@ -750,27 +734,25 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_906_000 picoseconds. - Weight::from_parts(9_264_446, 0) - // Standard Error: 19_760 - .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) + // Minimum execution time: 10_044_000 picoseconds. + Weight::from_parts(10_550_491, 0) + // Standard Error: 20_456 + .saturating_add(Weight::from_parts(925_808, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_266_000 picoseconds. - Weight::from_parts(10_602_261, 0) + // Minimum execution time: 11_361_000 picoseconds. + Weight::from_parts(11_935_556, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(315, 0).saturating_mul(n.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:1 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) @@ -790,14 +772,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4805 + r * (2121 ยฑ0)` - // Estimated: `13220 + r * (81321 ยฑ0)` - // Minimum execution time: 295_922_000 picoseconds. - Weight::from_parts(322_472_877, 13220) - // Standard Error: 993_812 - .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().reads((36_u64).saturating_mul(r.into()))) + // Measured: `4802 + r * (2121 ยฑ0)` + // Estimated: `10742 + r * (81321 ยฑ0)` + // Minimum execution time: 293_793_000 picoseconds. + Weight::from_parts(314_285_185, 10742) + // Standard Error: 808_383 + .saturating_add(Weight::from_parts(256_215_014, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().reads((38_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(T::DbWeight::get().writes((41_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) @@ -809,10 +791,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 9_427_000 picoseconds. - Weight::from_parts(12_996_213, 1561) - // Standard Error: 845 - .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) + // Minimum execution time: 10_323_000 picoseconds. + Weight::from_parts(10_996_645, 1561) + // Standard Error: 566 + .saturating_add(Weight::from_parts(1_133_870, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -820,10 +802,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_304_000 picoseconds. - Weight::from_parts(25_678_842, 0) - // Standard Error: 1_855 - .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) + // Minimum execution time: 10_122_000 picoseconds. + Weight::from_parts(17_368_451, 0) + // Standard Error: 679 + .saturating_add(Weight::from_parts(1_660_129, 0).saturating_mul(r.into())) } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -833,12 +815,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ยฑ0)` - // Minimum execution time: 23_425_000 picoseconds. - Weight::from_parts(15_229_010, 990) - // Standard Error: 14_380 - .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) + // Minimum execution time: 24_515_000 picoseconds. + Weight::from_parts(16_807_493, 990) + // Standard Error: 13_923 + .saturating_add(Weight::from_parts(2_315_122, 0).saturating_mul(t.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(573, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -848,20 +830,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_117_000 picoseconds. - Weight::from_parts(12_887_533, 0) - // Standard Error: 83 - .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) + // Minimum execution time: 9_596_000 picoseconds. + Weight::from_parts(9_113_960, 0) + // Standard Error: 139 + .saturating_add(Weight::from_parts(112_197, 0).saturating_mul(r.into())) } /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_982_000 picoseconds. - Weight::from_parts(11_176_000, 0) + // Minimum execution time: 11_260_000 picoseconds. + Weight::from_parts(11_341_000, 0) // Standard Error: 8 - .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(984, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -870,10 +852,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_150_000 picoseconds. - Weight::from_parts(9_269_000, 105) - // Standard Error: 8_147 - .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) + // Minimum execution time: 10_660_000 picoseconds. + Weight::from_parts(10_762_000, 105) + // Standard Error: 7_920 + .saturating_add(Weight::from_parts(5_122_380, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -885,10 +867,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `245` // Estimated: `245` - // Minimum execution time: 19_085_000 picoseconds. - Weight::from_parts(20_007_323, 245) - // Standard Error: 3 - .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) + // Minimum execution time: 19_446_000 picoseconds. + Weight::from_parts(20_166_940, 245) + // Standard Error: 2 + .saturating_add(Weight::from_parts(287, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -899,10 +881,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_127_000 picoseconds. - Weight::from_parts(21_152_987, 248) - // Standard Error: 3 - .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) + // Minimum execution time: 19_249_000 picoseconds. + Weight::from_parts(20_875_560, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(73, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -914,10 +896,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_264_000 picoseconds. - Weight::from_parts(9_449_000, 105) - // Standard Error: 8_196 - .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) + // Minimum execution time: 10_477_000 picoseconds. + Weight::from_parts(10_633_000, 105) + // Standard Error: 8_552 + .saturating_add(Weight::from_parts(5_159_505, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -929,10 +911,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 18_489_000 picoseconds. - Weight::from_parts(19_916_153, 248) + // Minimum execution time: 19_265_000 picoseconds. + Weight::from_parts(20_699_861, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -944,10 +926,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_299_000 picoseconds. - Weight::from_parts(9_464_000, 105) - // Standard Error: 6_827 - .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) + // Minimum execution time: 10_336_000 picoseconds. + Weight::from_parts(10_466_000, 105) + // Standard Error: 7_699 + .saturating_add(Weight::from_parts(4_542_224, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -958,10 +940,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 17_981_000 picoseconds. - Weight::from_parts(19_802_353, 248) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(20_357_236, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(588, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -972,10 +954,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_891_000 picoseconds. - Weight::from_parts(10_046_000, 105) - // Standard Error: 6_993 - .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) + // Minimum execution time: 10_432_000 picoseconds. + Weight::from_parts(10_658_000, 105) + // Standard Error: 7_129 + .saturating_add(Weight::from_parts(4_423_298, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -986,10 +968,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 17_229_000 picoseconds. - Weight::from_parts(18_302_733, 248) + // Minimum execution time: 17_663_000 picoseconds. + Weight::from_parts(19_107_828, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1000,10 +982,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_323_000 picoseconds. - Weight::from_parts(9_462_000, 105) - // Standard Error: 8_031 - .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) + // Minimum execution time: 10_254_000 picoseconds. + Weight::from_parts(10_332_000, 105) + // Standard Error: 9_485 + .saturating_add(Weight::from_parts(5_242_433, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -1015,10 +997,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 18_711_000 picoseconds. - Weight::from_parts(20_495_670, 248) + // Minimum execution time: 19_410_000 picoseconds. + Weight::from_parts(21_347_311, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(607, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1030,10 +1012,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `770` // Estimated: `4221 + r * (2475 ยฑ0)` - // Minimum execution time: 9_226_000 picoseconds. - Weight::from_parts(9_394_000, 4221) - // Standard Error: 14_741 - .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) + // Minimum execution time: 10_365_000 picoseconds. + Weight::from_parts(10_514_000, 4221) + // Standard Error: 18_360 + .saturating_add(Weight::from_parts(33_433_850, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -1048,18 +1030,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `520 + r * (170 ยฑ0)` - // Estimated: `6463 + r * (2646 ยฑ0)` - // Minimum execution time: 9_455_000 picoseconds. - Weight::from_parts(9_671_000, 6463) - // Standard Error: 126_080 - .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `517 + r * (170 ยฑ0)` + // Estimated: `3985 + r * (2646 ยฑ0)` + // Minimum execution time: 10_332_000 picoseconds. + Weight::from_parts(10_424_000, 3985) + // Standard Error: 117_754 + .saturating_add(Weight::from_parts(242_191_645, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -1071,19 +1051,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `System::EventTopics` (r:736 w:736) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (527 ยฑ0)` - // Estimated: `6447 + r * (2583 ยฑ10)` - // Minimum execution time: 9_274_000 picoseconds. - Weight::from_parts(9_437_000, 6447) - // Standard Error: 150_832 - .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) + // Estimated: `6444 + r * (2583 ยฑ10)` + // Minimum execution time: 10_550_000 picoseconds. + Weight::from_parts(10_667_000, 6444) + // Standard Error: 147_918 + .saturating_add(Weight::from_parts(242_824_174, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) @@ -1098,25 +1076,23 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `699 + t * (277 ยฑ0)` - // Estimated: `6639 + t * (3458 ยฑ0)` - // Minimum execution time: 214_483_000 picoseconds. - Weight::from_parts(122_634_366, 6639) - // Standard Error: 2_499_235 - .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) + // Measured: `696 + t * (277 ยฑ0)` + // Estimated: `6636 + t * (3457 ยฑ0)` + // Minimum execution time: 213_206_000 picoseconds. + Weight::from_parts(120_511_970, 6636) + // Standard Error: 2_501_856 + .saturating_add(Weight::from_parts(40_016_645, 0).saturating_mul(t.into())) // Standard Error: 3 - .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(Weight::from_parts(420, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 3458).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 3457).saturating_mul(t.into())) } /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1128,20 +1104,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1097 + r * (188 ยฑ0)` - // Estimated: `6990 + r * (2664 ยฑ0)` - // Minimum execution time: 341_569_000 picoseconds. - Weight::from_parts(360_574_000, 6990) - // Standard Error: 259_746 - .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `1094 + r * (188 ยฑ0)` + // Estimated: `6987 + r * (2664 ยฑ0)` + // Minimum execution time: 334_708_000 picoseconds. + Weight::from_parts(346_676_000, 6987) + // Standard Error: 236_074 + .saturating_add(Weight::from_parts(330_734_734, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) @@ -1157,8 +1131,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. @@ -1166,17 +1138,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `760 + t * (104 ยฑ0)` - // Estimated: `6719 + t * (2549 ยฑ1)` - // Minimum execution time: 1_863_119_000 picoseconds. - Weight::from_parts(900_189_174, 6719) - // Standard Error: 13_040_979 - .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) + // Measured: `757 + t * (104 ยฑ0)` + // Estimated: `6716 + t * (2549 ยฑ1)` + // Minimum execution time: 1_854_462_000 picoseconds. + Weight::from_parts(855_253_052, 6716) + // Standard Error: 13_502_046 + .saturating_add(Weight::from_parts(20_015_409, 0).saturating_mul(t.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_060, 0).saturating_mul(i.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_201, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -1187,138 +1159,136 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_211_000 picoseconds. - Weight::from_parts(11_696_412, 0) - // Standard Error: 388 - .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(10_319_961, 0) + // Standard Error: 293 + .saturating_add(Weight::from_parts(267_788, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_296_000 picoseconds. - Weight::from_parts(572_494, 0) + // Minimum execution time: 11_991_000 picoseconds. + Weight::from_parts(792_256, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_071, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_177_000 picoseconds. - Weight::from_parts(8_620_481, 0) - // Standard Error: 249 - .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) + // Minimum execution time: 10_210_000 picoseconds. + Weight::from_parts(8_251_750, 0) + // Standard Error: 584 + .saturating_add(Weight::from_parts(662_961, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_240_000 picoseconds. - Weight::from_parts(8_696_186, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) + // Minimum execution time: 11_994_000 picoseconds. + Weight::from_parts(6_532_799, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_351, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_889_000 picoseconds. - Weight::from_parts(16_103_170, 0) - // Standard Error: 343 - .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) + // Minimum execution time: 10_209_000 picoseconds. + Weight::from_parts(10_895_450, 0) + // Standard Error: 195 + .saturating_add(Weight::from_parts(328_195, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_405_000 picoseconds. - Weight::from_parts(2_264_024, 0) + // Minimum execution time: 11_493_000 picoseconds. + Weight::from_parts(4_721_812, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_215_000 picoseconds. - Weight::from_parts(10_505_632, 0) - // Standard Error: 240 - .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) + // Minimum execution time: 10_134_000 picoseconds. + Weight::from_parts(11_712_472, 0) + // Standard Error: 316 + .saturating_add(Weight::from_parts(335_912, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_440_000 picoseconds. - Weight::from_parts(2_575_889, 0) + // Minimum execution time: 11_448_000 picoseconds. + Weight::from_parts(1_407_440, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_205, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 55_119_000 picoseconds. - Weight::from_parts(56_732_248, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) + // Minimum execution time: 54_644_000 picoseconds. + Weight::from_parts(55_793_413, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_511, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_176_000 picoseconds. - Weight::from_parts(9_861_102, 0) - // Standard Error: 6_029 - .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) + // Minimum execution time: 10_378_000 picoseconds. + Weight::from_parts(25_185_485, 0) + // Standard Error: 8_828 + .saturating_add(Weight::from_parts(41_091_818, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_293_000 picoseconds. - Weight::from_parts(28_785_765, 0) - // Standard Error: 9_160 - .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) + // Minimum execution time: 10_371_000 picoseconds. + Weight::from_parts(35_350_533, 0) + // Standard Error: 9_805 + .saturating_add(Weight::from_parts(45_466_060, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_206_000 picoseconds. - Weight::from_parts(12_420_664, 0) - // Standard Error: 3_489 - .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) + // Minimum execution time: 10_407_000 picoseconds. + Weight::from_parts(14_375_492, 0) + // Standard Error: 4_036 + .saturating_add(Weight::from_parts(11_666_630, 0).saturating_mul(r.into())) } /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1535 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:1537 w:1537) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (926 ยฑ0)` - // Estimated: `8969 + r * (3047 ยฑ7)` - // Minimum execution time: 9_219_000 picoseconds. - Weight::from_parts(9_385_000, 8969) - // Standard Error: 45_562 - .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) + // Estimated: `8966 + r * (3047 ยฑ10)` + // Minimum execution time: 10_566_000 picoseconds. + Weight::from_parts(10_627_000, 8966) + // Standard Error: 46_429 + .saturating_add(Weight::from_parts(22_435_893, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) @@ -1330,10 +1300,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `274 + r * (78 ยฑ0)` // Estimated: `1265 + r * (2553 ยฑ0)` - // Minimum execution time: 9_355_000 picoseconds. - Weight::from_parts(15_071_309, 1265) - // Standard Error: 9_722 - .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) + // Minimum execution time: 10_305_000 picoseconds. + Weight::from_parts(16_073_202, 1265) + // Standard Error: 8_841 + .saturating_add(Weight::from_parts(5_125_440, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) @@ -1345,10 +1315,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `275 + r * (78 ยฑ0)` // Estimated: `990 + r * (2568 ยฑ0)` - // Minimum execution time: 8_979_000 picoseconds. - Weight::from_parts(14_362_224, 990) - // Standard Error: 9_137 - .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) + // Minimum execution time: 10_389_000 picoseconds. + Weight::from_parts(16_221_879, 990) + // Standard Error: 9_409 + .saturating_add(Weight::from_parts(4_235_040, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) @@ -1357,8 +1327,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1372,13 +1340,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 ยฑ0)` - // Estimated: `9282 + r * (3 ยฑ0)` - // Minimum execution time: 269_704_000 picoseconds. - Weight::from_parts(289_916_035, 9282) - // Standard Error: 408 - .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Measured: `858 + r * (3 ยฑ0)` + // Estimated: `6804 + r * (3 ยฑ0)` + // Minimum execution time: 265_499_000 picoseconds. + Weight::from_parts(282_172_889, 6804) + // Standard Error: 442 + .saturating_add(Weight::from_parts(165_070, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -1387,10 +1355,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_361_000 picoseconds. - Weight::from_parts(11_633_836, 0) - // Standard Error: 86 - .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) + // Minimum execution time: 10_367_000 picoseconds. + Weight::from_parts(13_220_303, 0) + // Standard Error: 151 + .saturating_add(Weight::from_parts(86_117, 0).saturating_mul(r.into())) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1399,10 +1367,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(13_259_836, 1704) - // Standard Error: 121 - .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) + // Minimum execution time: 10_223_000 picoseconds. + Weight::from_parts(14_170_002, 1704) + // Standard Error: 71 + .saturating_add(Weight::from_parts(76_372, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1410,10 +1378,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 851_000 picoseconds. - Weight::from_parts(587_883, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) + // Minimum execution time: 754_000 picoseconds. + Weight::from_parts(1_091_740, 0) + // Standard Error: 29 + .saturating_add(Weight::from_parts(14_954, 0).saturating_mul(r.into())) } } @@ -1425,8 +1393,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_149_000 picoseconds. - Weight::from_parts(2_274_000, 1627) + // Minimum execution time: 2_002_000 picoseconds. + Weight::from_parts(2_193_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1436,10 +1404,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ยฑ0)` // Estimated: `442 + k * (70 ยฑ0)` - // Minimum execution time: 12_863_000 picoseconds. - Weight::from_parts(13_188_000, 442) - // Standard Error: 1_053 - .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) + // Minimum execution time: 12_339_000 picoseconds. + Weight::from_parts(12_682_000, 442) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(1_163_234, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1453,10 +1421,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ยฑ0)` // Estimated: `6149 + c * (1 ยฑ0)` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(9_203_290, 6149) + // Minimum execution time: 8_145_000 picoseconds. + Weight::from_parts(8_747_247, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_154, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1469,8 +1437,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 17_177_000 picoseconds. - Weight::from_parts(17_663_000, 6450) + // Minimum execution time: 16_950_000 picoseconds. + Weight::from_parts(17_498_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1483,10 +1451,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ยฑ0)` // Estimated: `3635 + k * (1 ยฑ0)` - // Minimum execution time: 3_636_000 picoseconds. - Weight::from_parts(3_774_000, 3635) - // Standard Error: 542 - .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) + // Minimum execution time: 3_431_000 picoseconds. + Weight::from_parts(2_161_027, 3635) + // Standard Error: 949 + .saturating_add(Weight::from_parts(1_219_406, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1496,8 +1464,6 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -1505,13 +1471,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (1 ยฑ0)` - // Estimated: `6266 + c * (1 ยฑ0)` - // Minimum execution time: 21_585_000 picoseconds. - Weight::from_parts(22_069_944, 6266) + // Measured: `325 + c * (1 ยฑ0)` + // Estimated: `6263 + c * (1 ยฑ0)` + // Minimum execution time: 16_384_000 picoseconds. + Weight::from_parts(16_741_331, 6263) // Standard Error: 1 - .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(375, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -1521,8 +1487,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 13_283_000 picoseconds. - Weight::from_parts(14_015_000, 6380) + // Minimum execution time: 12_529_000 picoseconds. + Weight::from_parts(13_319_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1536,8 +1502,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 48_022_000 picoseconds. - Weight::from_parts(49_627_000, 6292) + // Minimum execution time: 47_462_000 picoseconds. + Weight::from_parts(48_784_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1549,8 +1515,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 58_374_000 picoseconds. - Weight::from_parts(59_615_000, 6534) + // Minimum execution time: 55_712_000 picoseconds. + Weight::from_parts(58_629_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1560,8 +1526,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 12_559_000 picoseconds. - Weight::from_parts(12_947_000, 6349) + // Minimum execution time: 11_992_000 picoseconds. + Weight::from_parts(12_686_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1571,8 +1537,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_680_000, 1627) + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_594_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1584,8 +1550,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_625_000 picoseconds. - Weight::from_parts(13_094_000, 3631) + // Minimum execution time: 12_179_000 picoseconds. + Weight::from_parts(12_805_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1595,8 +1561,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_836_000 picoseconds. - Weight::from_parts(5_182_000, 3607) + // Minimum execution time: 4_695_000 picoseconds. + Weight::from_parts(5_105_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1607,8 +1573,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_319_000 picoseconds. - Weight::from_parts(6_582_000, 3632) + // Minimum execution time: 6_223_000 picoseconds. + Weight::from_parts(6_509_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1619,15 +1585,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_532_000 picoseconds. - Weight::from_parts(6_909_000, 3607) + // Minimum execution time: 6_073_000 picoseconds. + Weight::from_parts(6_524_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1643,20 +1607,18 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `804 + c * (1 ยฑ0)` - // Estimated: `9217 + c * (1 ยฑ0)` - // Minimum execution time: 305_778_000 picoseconds. - Weight::from_parts(282_321_249, 9217) - // Standard Error: 72 - .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `801 + c * (1 ยฑ0)` + // Estimated: `6739 + c * (1 ยฑ0)` + // Minimum execution time: 289_627_000 picoseconds. + Weight::from_parts(281_167_857, 6739) + // Standard Error: 68 + .saturating_add(Weight::from_parts(33_442, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) @@ -1678,17 +1640,17 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `8740` - // Minimum execution time: 3_810_809_000 picoseconds. - Weight::from_parts(739_511_598, 8740) - // Standard Error: 140 - .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) + // Measured: `323` + // Estimated: `8737` + // Minimum execution time: 3_829_638_000 picoseconds. + Weight::from_parts(744_994_885, 8737) + // Standard Error: 165 + .saturating_add(Weight::from_parts(68_083, 0).saturating_mul(c.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_484, 0).saturating_mul(i.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_581, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1697,8 +1659,6 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1715,21 +1675,19 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` - // Estimated: `8982` - // Minimum execution time: 1_986_789_000 picoseconds. - Weight::from_parts(2_017_466_000, 8982) - // Standard Error: 26 - .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) - // Standard Error: 26 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + // Measured: `560` + // Estimated: `6504` + // Minimum execution time: 1_960_218_000 picoseconds. + Weight::from_parts(1_976_273_000, 6504) + // Standard Error: 25 + .saturating_add(Weight::from_parts(866, 0).saturating_mul(i.into())) + // Standard Error: 25 + .saturating_add(Weight::from_parts(824, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1744,17 +1702,15 @@ impl WeightInfo for () { /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `829` - // Estimated: `9244` - // Minimum execution time: 210_724_000 picoseconds. - Weight::from_parts(218_608_000, 9244) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `826` + // Estimated: `6766` + // Minimum execution time: 200_542_000 picoseconds. + Weight::from_parts(209_713_000, 6766) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) @@ -1766,19 +1722,17 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_enforced(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 271_259_000 picoseconds. - Weight::from_parts(298_852_854, 6085) - // Standard Error: 65 - .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 258_375_000 picoseconds. + Weight::from_parts(271_214_455, 3607) + // Standard Error: 61 + .saturating_add(Weight::from_parts(32_587, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) @@ -1790,13 +1744,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_relaxed(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 278_167_000 picoseconds. - Weight::from_parts(311_888_941, 6085) - // Standard Error: 58 - .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 279_363_000 picoseconds. + Weight::from_parts(257_721_413, 3607) + // Standard Error: 81 + .saturating_add(Weight::from_parts(33_850, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1813,8 +1767,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 47_403_000 picoseconds. - Weight::from_parts(48_707_000, 3780) + // Minimum execution time: 45_096_000 picoseconds. + Weight::from_parts(46_661_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1830,8 +1784,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 35_361_000 picoseconds. - Weight::from_parts(36_714_000, 8967) + // Minimum execution time: 34_260_000 picoseconds. + Weight::from_parts(35_761_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1840,10 +1794,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_340_000 picoseconds. - Weight::from_parts(9_360_237, 0) - // Standard Error: 269 - .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) + // Minimum execution time: 10_265_000 picoseconds. + Weight::from_parts(10_174_088, 0) + // Standard Error: 275 + .saturating_add(Weight::from_parts(271_791, 0).saturating_mul(r.into())) } /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1852,10 +1806,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `509 + r * (77 ยฑ0)` // Estimated: `1467 + r * (2552 ยฑ0)` - // Minimum execution time: 9_059_000 picoseconds. - Weight::from_parts(9_201_000, 1467) - // Standard Error: 5_643 - .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) + // Minimum execution time: 10_498_000 picoseconds. + Weight::from_parts(10_551_000, 1467) + // Standard Error: 5_538 + .saturating_add(Weight::from_parts(3_269_462, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) } @@ -1866,10 +1820,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `517 + r * (170 ยฑ0)` // Estimated: `1468 + r * (2645 ยฑ0)` - // Minimum execution time: 9_220_000 picoseconds. - Weight::from_parts(9_399_000, 1468) - // Standard Error: 6_194 - .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) + // Minimum execution time: 10_289_000 picoseconds. + Weight::from_parts(10_469_000, 1468) + // Standard Error: 5_674 + .saturating_add(Weight::from_parts(4_105_274, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) } @@ -1878,50 +1832,50 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(10_100_456, 0) - // Standard Error: 234 - .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) + // Minimum execution time: 10_769_000 picoseconds. + Weight::from_parts(10_389_944, 0) + // Standard Error: 240 + .saturating_add(Weight::from_parts(350_466, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_524_000 picoseconds. - Weight::from_parts(10_813_389, 0) - // Standard Error: 76 - .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) + // Minimum execution time: 10_443_000 picoseconds. + Weight::from_parts(11_651_820, 0) + // Standard Error: 91 + .saturating_add(Weight::from_parts(100_579, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_799_000 picoseconds. - Weight::from_parts(10_886_744, 0) - // Standard Error: 75 - .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) + // Minimum execution time: 10_474_000 picoseconds. + Weight::from_parts(11_313_654, 0) + // Standard Error: 103 + .saturating_add(Weight::from_parts(85_902, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_895_000 picoseconds. - Weight::from_parts(10_658_338, 0) - // Standard Error: 189 - .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) + // Minimum execution time: 10_360_000 picoseconds. + Weight::from_parts(11_283_384, 0) + // Standard Error: 163 + .saturating_add(Weight::from_parts(253_111, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_643_000 picoseconds. - Weight::from_parts(10_932_126, 0) - // Standard Error: 153 - .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) + // Minimum execution time: 10_289_000 picoseconds. + Weight::from_parts(10_747_872, 0) + // Standard Error: 197 + .saturating_add(Weight::from_parts(299_097, 0).saturating_mul(r.into())) } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) @@ -1930,10 +1884,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3599` - // Minimum execution time: 9_548_000 picoseconds. - Weight::from_parts(9_737_000, 3599) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) + // Minimum execution time: 10_368_000 picoseconds. + Weight::from_parts(29_685_372, 3599) + // Standard Error: 1_202 + .saturating_add(Weight::from_parts(1_517_645, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1941,40 +1895,40 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_172_000 picoseconds. - Weight::from_parts(18_255_933, 0) - // Standard Error: 540 - .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) + // Minimum execution time: 10_528_000 picoseconds. + Weight::from_parts(11_653_603, 0) + // Standard Error: 203 + .saturating_add(Weight::from_parts(241_937, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_232_000 picoseconds. - Weight::from_parts(9_796_584, 0) - // Standard Error: 208 - .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) + // Minimum execution time: 10_385_000 picoseconds. + Weight::from_parts(11_483_212, 0) + // Standard Error: 227 + .saturating_add(Weight::from_parts(248_076, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_747_000 picoseconds. - Weight::from_parts(8_733_230, 0) - // Standard Error: 377 - .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) + // Minimum execution time: 10_341_000 picoseconds. + Weight::from_parts(12_055_382, 0) + // Standard Error: 1_231 + .saturating_add(Weight::from_parts(249_662, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(10_194_153, 0) - // Standard Error: 516 - .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) + // Minimum execution time: 10_467_000 picoseconds. + Weight::from_parts(10_579_667, 0) + // Standard Error: 247 + .saturating_add(Weight::from_parts(246_711, 0).saturating_mul(r.into())) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -1983,10 +1937,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 9_022_000 picoseconds. - Weight::from_parts(22_051_160, 1552) - // Standard Error: 697 - .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) + // Minimum execution time: 10_293_000 picoseconds. + Weight::from_parts(18_229_738, 1552) + // Standard Error: 452 + .saturating_add(Weight::from_parts(655_277, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1994,17 +1948,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_135_000 picoseconds. - Weight::from_parts(10_646_215, 0) - // Standard Error: 161 - .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) + // Minimum execution time: 10_355_000 picoseconds. + Weight::from_parts(11_641_920, 0) + // Standard Error: 166 + .saturating_add(Weight::from_parts(168_271, 0).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2018,13 +1970,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1048576]`. fn seal_input_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `9287` - // Minimum execution time: 273_896_000 picoseconds. - Weight::from_parts(148_309_654, 9287) + // Measured: `869` + // Estimated: `6809` + // Minimum execution time: 268_424_000 picoseconds. + Weight::from_parts(136_261_773, 6809) // Standard Error: 16 - .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(Weight::from_parts(1_373, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// The range of component `r` is `[0, 1]`. @@ -2032,27 +1984,25 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_906_000 picoseconds. - Weight::from_parts(9_264_446, 0) - // Standard Error: 19_760 - .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) + // Minimum execution time: 10_044_000 picoseconds. + Weight::from_parts(10_550_491, 0) + // Standard Error: 20_456 + .saturating_add(Weight::from_parts(925_808, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_266_000 picoseconds. - Weight::from_parts(10_602_261, 0) + // Minimum execution time: 11_361_000 picoseconds. + Weight::from_parts(11_935_556, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(315, 0).saturating_mul(n.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:1 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) @@ -2072,14 +2022,14 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `4805 + r * (2121 ยฑ0)` - // Estimated: `13220 + r * (81321 ยฑ0)` - // Minimum execution time: 295_922_000 picoseconds. - Weight::from_parts(322_472_877, 13220) - // Standard Error: 993_812 - .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().reads((36_u64).saturating_mul(r.into()))) + // Measured: `4802 + r * (2121 ยฑ0)` + // Estimated: `10742 + r * (81321 ยฑ0)` + // Minimum execution time: 293_793_000 picoseconds. + Weight::from_parts(314_285_185, 10742) + // Standard Error: 808_383 + .saturating_add(Weight::from_parts(256_215_014, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().reads((38_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(RocksDbWeight::get().writes((41_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) @@ -2091,10 +2041,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 9_427_000 picoseconds. - Weight::from_parts(12_996_213, 1561) - // Standard Error: 845 - .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) + // Minimum execution time: 10_323_000 picoseconds. + Weight::from_parts(10_996_645, 1561) + // Standard Error: 566 + .saturating_add(Weight::from_parts(1_133_870, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -2102,10 +2052,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_304_000 picoseconds. - Weight::from_parts(25_678_842, 0) - // Standard Error: 1_855 - .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) + // Minimum execution time: 10_122_000 picoseconds. + Weight::from_parts(17_368_451, 0) + // Standard Error: 679 + .saturating_add(Weight::from_parts(1_660_129, 0).saturating_mul(r.into())) } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -2115,12 +2065,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ยฑ0)` - // Minimum execution time: 23_425_000 picoseconds. - Weight::from_parts(15_229_010, 990) - // Standard Error: 14_380 - .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) + // Minimum execution time: 24_515_000 picoseconds. + Weight::from_parts(16_807_493, 990) + // Standard Error: 13_923 + .saturating_add(Weight::from_parts(2_315_122, 0).saturating_mul(t.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(573, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -2130,20 +2080,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_117_000 picoseconds. - Weight::from_parts(12_887_533, 0) - // Standard Error: 83 - .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) + // Minimum execution time: 9_596_000 picoseconds. + Weight::from_parts(9_113_960, 0) + // Standard Error: 139 + .saturating_add(Weight::from_parts(112_197, 0).saturating_mul(r.into())) } /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_982_000 picoseconds. - Weight::from_parts(11_176_000, 0) + // Minimum execution time: 11_260_000 picoseconds. + Weight::from_parts(11_341_000, 0) // Standard Error: 8 - .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(984, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -2152,10 +2102,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_150_000 picoseconds. - Weight::from_parts(9_269_000, 105) - // Standard Error: 8_147 - .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) + // Minimum execution time: 10_660_000 picoseconds. + Weight::from_parts(10_762_000, 105) + // Standard Error: 7_920 + .saturating_add(Weight::from_parts(5_122_380, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -2167,10 +2117,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `245` // Estimated: `245` - // Minimum execution time: 19_085_000 picoseconds. - Weight::from_parts(20_007_323, 245) - // Standard Error: 3 - .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) + // Minimum execution time: 19_446_000 picoseconds. + Weight::from_parts(20_166_940, 245) + // Standard Error: 2 + .saturating_add(Weight::from_parts(287, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2181,10 +2131,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_127_000 picoseconds. - Weight::from_parts(21_152_987, 248) - // Standard Error: 3 - .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) + // Minimum execution time: 19_249_000 picoseconds. + Weight::from_parts(20_875_560, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(73, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -2196,10 +2146,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_264_000 picoseconds. - Weight::from_parts(9_449_000, 105) - // Standard Error: 8_196 - .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) + // Minimum execution time: 10_477_000 picoseconds. + Weight::from_parts(10_633_000, 105) + // Standard Error: 8_552 + .saturating_add(Weight::from_parts(5_159_505, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -2211,10 +2161,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 18_489_000 picoseconds. - Weight::from_parts(19_916_153, 248) + // Minimum execution time: 19_265_000 picoseconds. + Weight::from_parts(20_699_861, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -2226,10 +2176,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_299_000 picoseconds. - Weight::from_parts(9_464_000, 105) - // Standard Error: 6_827 - .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) + // Minimum execution time: 10_336_000 picoseconds. + Weight::from_parts(10_466_000, 105) + // Standard Error: 7_699 + .saturating_add(Weight::from_parts(4_542_224, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -2240,10 +2190,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 17_981_000 picoseconds. - Weight::from_parts(19_802_353, 248) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(20_357_236, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(588, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -2254,10 +2204,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_891_000 picoseconds. - Weight::from_parts(10_046_000, 105) - // Standard Error: 6_993 - .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) + // Minimum execution time: 10_432_000 picoseconds. + Weight::from_parts(10_658_000, 105) + // Standard Error: 7_129 + .saturating_add(Weight::from_parts(4_423_298, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -2268,10 +2218,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 17_229_000 picoseconds. - Weight::from_parts(18_302_733, 248) + // Minimum execution time: 17_663_000 picoseconds. + Weight::from_parts(19_107_828, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -2282,10 +2232,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ยฑ0)` // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 9_323_000 picoseconds. - Weight::from_parts(9_462_000, 105) - // Standard Error: 8_031 - .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) + // Minimum execution time: 10_254_000 picoseconds. + Weight::from_parts(10_332_000, 105) + // Standard Error: 9_485 + .saturating_add(Weight::from_parts(5_242_433, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -2297,10 +2247,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 18_711_000 picoseconds. - Weight::from_parts(20_495_670, 248) + // Minimum execution time: 19_410_000 picoseconds. + Weight::from_parts(21_347_311, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(607, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -2312,10 +2262,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `770` // Estimated: `4221 + r * (2475 ยฑ0)` - // Minimum execution time: 9_226_000 picoseconds. - Weight::from_parts(9_394_000, 4221) - // Standard Error: 14_741 - .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) + // Minimum execution time: 10_365_000 picoseconds. + Weight::from_parts(10_514_000, 4221) + // Standard Error: 18_360 + .saturating_add(Weight::from_parts(33_433_850, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -2330,18 +2280,16 @@ impl WeightInfo for () { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_call(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `520 + r * (170 ยฑ0)` - // Estimated: `6463 + r * (2646 ยฑ0)` - // Minimum execution time: 9_455_000 picoseconds. - Weight::from_parts(9_671_000, 6463) - // Standard Error: 126_080 - .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `517 + r * (170 ยฑ0)` + // Estimated: `3985 + r * (2646 ยฑ0)` + // Minimum execution time: 10_332_000 picoseconds. + Weight::from_parts(10_424_000, 3985) + // Standard Error: 117_754 + .saturating_add(Weight::from_parts(242_191_645, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) @@ -2353,19 +2301,17 @@ impl WeightInfo for () { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `System::EventTopics` (r:736 w:736) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// The range of component `r` is `[0, 800]`. fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (527 ยฑ0)` - // Estimated: `6447 + r * (2583 ยฑ10)` - // Minimum execution time: 9_274_000 picoseconds. - Weight::from_parts(9_437_000, 6447) - // Standard Error: 150_832 - .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) + // Estimated: `6444 + r * (2583 ยฑ10)` + // Minimum execution time: 10_550_000 picoseconds. + Weight::from_parts(10_667_000, 6444) + // Standard Error: 147_918 + .saturating_add(Weight::from_parts(242_824_174, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) @@ -2380,25 +2326,23 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1048576]`. fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `699 + t * (277 ยฑ0)` - // Estimated: `6639 + t * (3458 ยฑ0)` - // Minimum execution time: 214_483_000 picoseconds. - Weight::from_parts(122_634_366, 6639) - // Standard Error: 2_499_235 - .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) + // Measured: `696 + t * (277 ยฑ0)` + // Estimated: `6636 + t * (3457 ยฑ0)` + // Minimum execution time: 213_206_000 picoseconds. + Weight::from_parts(120_511_970, 6636) + // Standard Error: 2_501_856 + .saturating_add(Weight::from_parts(40_016_645, 0).saturating_mul(t.into())) // Standard Error: 3 - .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(Weight::from_parts(420, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 3458).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 3457).saturating_mul(t.into())) } /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -2410,20 +2354,18 @@ impl WeightInfo for () { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[1, 800]`. fn seal_instantiate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1097 + r * (188 ยฑ0)` - // Estimated: `6990 + r * (2664 ยฑ0)` - // Minimum execution time: 341_569_000 picoseconds. - Weight::from_parts(360_574_000, 6990) - // Standard Error: 259_746 - .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `1094 + r * (188 ยฑ0)` + // Estimated: `6987 + r * (2664 ยฑ0)` + // Minimum execution time: 334_708_000 picoseconds. + Weight::from_parts(346_676_000, 6987) + // Standard Error: 236_074 + .saturating_add(Weight::from_parts(330_734_734, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) @@ -2439,8 +2381,6 @@ impl WeightInfo for () { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. @@ -2448,17 +2388,17 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 983040]`. fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `760 + t * (104 ยฑ0)` - // Estimated: `6719 + t * (2549 ยฑ1)` - // Minimum execution time: 1_863_119_000 picoseconds. - Weight::from_parts(900_189_174, 6719) - // Standard Error: 13_040_979 - .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) + // Measured: `757 + t * (104 ยฑ0)` + // Estimated: `6716 + t * (2549 ยฑ1)` + // Minimum execution time: 1_854_462_000 picoseconds. + Weight::from_parts(855_253_052, 6716) + // Standard Error: 13_502_046 + .saturating_add(Weight::from_parts(20_015_409, 0).saturating_mul(t.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_060, 0).saturating_mul(i.into())) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_201, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) @@ -2469,138 +2409,136 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_211_000 picoseconds. - Weight::from_parts(11_696_412, 0) - // Standard Error: 388 - .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(10_319_961, 0) + // Standard Error: 293 + .saturating_add(Weight::from_parts(267_788, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_296_000 picoseconds. - Weight::from_parts(572_494, 0) + // Minimum execution time: 11_991_000 picoseconds. + Weight::from_parts(792_256, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_071, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_177_000 picoseconds. - Weight::from_parts(8_620_481, 0) - // Standard Error: 249 - .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) + // Minimum execution time: 10_210_000 picoseconds. + Weight::from_parts(8_251_750, 0) + // Standard Error: 584 + .saturating_add(Weight::from_parts(662_961, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_240_000 picoseconds. - Weight::from_parts(8_696_186, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) + // Minimum execution time: 11_994_000 picoseconds. + Weight::from_parts(6_532_799, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(3_351, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_889_000 picoseconds. - Weight::from_parts(16_103_170, 0) - // Standard Error: 343 - .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) + // Minimum execution time: 10_209_000 picoseconds. + Weight::from_parts(10_895_450, 0) + // Standard Error: 195 + .saturating_add(Weight::from_parts(328_195, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_405_000 picoseconds. - Weight::from_parts(2_264_024, 0) + // Minimum execution time: 11_493_000 picoseconds. + Weight::from_parts(4_721_812, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_215_000 picoseconds. - Weight::from_parts(10_505_632, 0) - // Standard Error: 240 - .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) + // Minimum execution time: 10_134_000 picoseconds. + Weight::from_parts(11_712_472, 0) + // Standard Error: 316 + .saturating_add(Weight::from_parts(335_912, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_440_000 picoseconds. - Weight::from_parts(2_575_889, 0) + // Minimum execution time: 11_448_000 picoseconds. + Weight::from_parts(1_407_440, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_205, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 55_119_000 picoseconds. - Weight::from_parts(56_732_248, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) + // Minimum execution time: 54_644_000 picoseconds. + Weight::from_parts(55_793_413, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_511, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_176_000 picoseconds. - Weight::from_parts(9_861_102, 0) - // Standard Error: 6_029 - .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) + // Minimum execution time: 10_378_000 picoseconds. + Weight::from_parts(25_185_485, 0) + // Standard Error: 8_828 + .saturating_add(Weight::from_parts(41_091_818, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_293_000 picoseconds. - Weight::from_parts(28_785_765, 0) - // Standard Error: 9_160 - .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) + // Minimum execution time: 10_371_000 picoseconds. + Weight::from_parts(35_350_533, 0) + // Standard Error: 9_805 + .saturating_add(Weight::from_parts(45_466_060, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_206_000 picoseconds. - Weight::from_parts(12_420_664, 0) - // Standard Error: 3_489 - .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) + // Minimum execution time: 10_407_000 picoseconds. + Weight::from_parts(14_375_492, 0) + // Standard Error: 4_036 + .saturating_add(Weight::from_parts(11_666_630, 0).saturating_mul(r.into())) } /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1535 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:1537 w:1537) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 1600]`. fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (926 ยฑ0)` - // Estimated: `8969 + r * (3047 ยฑ7)` - // Minimum execution time: 9_219_000 picoseconds. - Weight::from_parts(9_385_000, 8969) - // Standard Error: 45_562 - .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) + // Estimated: `8966 + r * (3047 ยฑ10)` + // Minimum execution time: 10_566_000 picoseconds. + Weight::from_parts(10_627_000, 8966) + // Standard Error: 46_429 + .saturating_add(Weight::from_parts(22_435_893, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) @@ -2612,10 +2550,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `274 + r * (78 ยฑ0)` // Estimated: `1265 + r * (2553 ยฑ0)` - // Minimum execution time: 9_355_000 picoseconds. - Weight::from_parts(15_071_309, 1265) - // Standard Error: 9_722 - .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) + // Minimum execution time: 10_305_000 picoseconds. + Weight::from_parts(16_073_202, 1265) + // Standard Error: 8_841 + .saturating_add(Weight::from_parts(5_125_440, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) @@ -2627,10 +2565,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `275 + r * (78 ยฑ0)` // Estimated: `990 + r * (2568 ยฑ0)` - // Minimum execution time: 8_979_000 picoseconds. - Weight::from_parts(14_362_224, 990) - // Standard Error: 9_137 - .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) + // Minimum execution time: 10_389_000 picoseconds. + Weight::from_parts(16_221_879, 990) + // Standard Error: 9_409 + .saturating_add(Weight::from_parts(4_235_040, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) @@ -2639,8 +2577,6 @@ impl WeightInfo for () { /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -2654,13 +2590,13 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 1600]`. fn seal_reentrance_count(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `861 + r * (3 ยฑ0)` - // Estimated: `9282 + r * (3 ยฑ0)` - // Minimum execution time: 269_704_000 picoseconds. - Weight::from_parts(289_916_035, 9282) - // Standard Error: 408 - .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Measured: `858 + r * (3 ยฑ0)` + // Estimated: `6804 + r * (3 ยฑ0)` + // Minimum execution time: 265_499_000 picoseconds. + Weight::from_parts(282_172_889, 6804) + // Standard Error: 442 + .saturating_add(Weight::from_parts(165_070, 0).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) } @@ -2669,10 +2605,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_361_000 picoseconds. - Weight::from_parts(11_633_836, 0) - // Standard Error: 86 - .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) + // Minimum execution time: 10_367_000 picoseconds. + Weight::from_parts(13_220_303, 0) + // Standard Error: 151 + .saturating_add(Weight::from_parts(86_117, 0).saturating_mul(r.into())) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -2681,10 +2617,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(13_259_836, 1704) - // Standard Error: 121 - .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) + // Minimum execution time: 10_223_000 picoseconds. + Weight::from_parts(14_170_002, 1704) + // Standard Error: 71 + .saturating_add(Weight::from_parts(76_372, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2692,9 +2628,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 851_000 picoseconds. - Weight::from_parts(587_883, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) + // Minimum execution time: 754_000 picoseconds. + Weight::from_parts(1_091_740, 0) + // Standard Error: 29 + .saturating_add(Weight::from_parts(14_954, 0).saturating_mul(r.into())) } } -- GitLab From 2c48b9ddb0a5de4499d4ed699b79eacc354f016a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 17 May 2024 11:00:39 +0300 Subject: [PATCH 029/106] Bridge: fixed relayer version metric value (#4492) Before relayer crates have been moved + merged, the `MetricsParams` type has been created from a `substrate-relay` crate (binary) and hence it has been setting the `substrate_relay_build_info` metic value properly - to the binary version. Now it is created from the `substrate-relay-helper` crate, which has the fixed (it isn't published) version `0.1.0`, so our relay provides incorrect metric value. This 'breaks' our monitoring tools - we see that all relayers have that incorrect version, which is not cool. The idea is to have a global static variable (shame on me) that is initialized by the binary during initialization like we do with the logger initialization already. Was considering some alternative options: - adding a separate argument to every relayer subcommand and propagating it to the `MetricsParams::new()` causes a lot of changes and introduces even more noise to the binary code, which is supposed to be as small as possible in the new design. But I could do that if team thinks it is better; - adding a `structopt(skip) pub relayer_version: RelayerVersion` argument to all subcommand params won't work, because it will be initialized by default and `RelayerVersion` needs to reside in some util crate (not the binary), so it'll have the wrong value again. --- Cargo.lock | 1 + bridges/relays/lib-substrate-relay/src/cli/mod.rs | 13 ++++++------- bridges/relays/utils/Cargo.toml | 1 + bridges/relays/utils/src/initialize.rs | 5 +++++ 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 642fe88db00..2a4b9b138bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15472,6 +15472,7 @@ dependencies = [ "jsonpath_lib", "log", "num-traits", + "parking_lot 0.12.1", "serde_json", "sp-runtime", "substrate-prometheus-endpoint", diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index 0dd0d5474b3..270608bf6ed 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -125,14 +125,13 @@ impl PrometheusParams { None }; - let relay_version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown"); + let relay_version = relay_utils::initialize::RELAYER_VERSION + .lock() + .clone() + .unwrap_or_else(|| "unknown".to_string()); let relay_commit = SubstrateRelayBuildInfo::get_git_commit(); - relay_utils::metrics::MetricsParams::new( - metrics_address, - relay_version.into(), - relay_commit, - ) - .map_err(|e| anyhow::format_err!("{:?}", e)) + relay_utils::metrics::MetricsParams::new(metrics_address, relay_version, relay_commit) + .map_err(|e| anyhow::format_err!("{:?}", e)) } } diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index ee56ebf9a95..1264f582983 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -22,6 +22,7 @@ futures = "0.3.30" jsonpath_lib = "0.3" log = { workspace = true } num-traits = "0.2" +parking_lot = "0.12.1" serde_json = { workspace = true, default-features = true } sysinfo = "0.30" time = { version = "0.3", features = ["formatting", "local-offset", "std"] } diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index 8224c1803ad..64d71024271 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -16,8 +16,13 @@ //! Relayer initialization functions. +use parking_lot::Mutex; use std::{cell::RefCell, fmt::Display, io::Write}; +/// Relayer version that is provided as metric. Must be set by a binary +/// (get it with `option_env!("CARGO_PKG_VERSION")` from a binary package code). +pub static RELAYER_VERSION: Mutex> = Mutex::new(None); + async_std::task_local! { pub(crate) static LOOP_NAME: RefCell = RefCell::new(String::default()); } -- GitLab From ca0fb0d9a9842fb7600f3c9a56bd188f24bc5fb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 17 May 2024 13:35:15 +0200 Subject: [PATCH 030/106] pallet_balances: Add `try_state` for checking `Holds` and `Freezes` (#4490) Co-authored-by: command-bot <> --- substrate/frame/balances/src/lib.rs | 25 ++++++++++++++- .../frame/balances/src/tests/general_tests.rs | 32 +++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 8d904d3d21b..56eb81b49e2 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -542,8 +542,8 @@ pub mod pallet { #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { - #[cfg(not(feature = "insecure_zero_ed"))] fn integrity_test() { + #[cfg(not(feature = "insecure_zero_ed"))] assert!( !>::ExistentialDeposit::get().is_zero(), "The existential deposit must be greater than zero!" @@ -555,6 +555,29 @@ pub mod pallet { T::MaxFreezes::get(), ::VARIANT_COUNT, ); } + + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Holds::::iter_keys().try_for_each(|k| { + if Holds::::decode_len(k).unwrap_or(0) > + T::RuntimeHoldReason::VARIANT_COUNT as usize + { + Err("Found `Hold` with too many elements") + } else { + Ok(()) + } + })?; + + Freezes::::iter_keys().try_for_each(|k| { + if Freezes::::decode_len(k).unwrap_or(0) > T::MaxFreezes::get() as usize { + Err("Found `Freeze` with too many elements") + } else { + Ok(()) + } + })?; + + Ok(()) + } } #[pallet::call(weight(>::WeightInfo))] diff --git a/substrate/frame/balances/src/tests/general_tests.rs b/substrate/frame/balances/src/tests/general_tests.rs index 0f3e015d0a8..a855fae5616 100644 --- a/substrate/frame/balances/src/tests/general_tests.rs +++ b/substrate/frame/balances/src/tests/general_tests.rs @@ -109,3 +109,35 @@ fn regression_historic_acc_does_not_evaporate_reserve() { }); }); } + +#[cfg(feature = "try-runtime")] +#[test] +fn try_state_works() { + use crate::{Config, Freezes, Holds}; + use frame_support::{ + storage, + traits::{Get, Hooks, VariantCount}, + }; + + ExtBuilder::default().build_and_execute_with(|| { + storage::unhashed::put( + &Holds::::hashed_key_for(1), + &vec![0u8; ::RuntimeHoldReason::VARIANT_COUNT as usize + 1], + ); + + assert!(format!("{:?}", Balances::try_state(0).unwrap_err()) + .contains("Found `Hold` with too many elements")); + }); + + ExtBuilder::default().build_and_execute_with(|| { + let max_freezes: u32 = ::MaxFreezes::get(); + + storage::unhashed::put( + &Freezes::::hashed_key_for(1), + &vec![0u8; max_freezes as usize + 1], + ); + + assert!(format!("{:?}", Balances::try_state(0).unwrap_err()) + .contains("Found `Freeze` with too many elements")); + }); +} -- GitLab From 65c52484295ae8f83e5e5b08678d753434149379 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Fri, 17 May 2024 13:41:06 +0200 Subject: [PATCH 031/106] Snowbridge - Ethereum Client - Public storage items (#4501) Changes the Ethereum client storage scope to public, so it can be set in a migration. When merged, we should backport to the all other release branches: - [ ] release-crates-io-v1.7.0 - patch release the fellows BridgeHubs runtimes https://github.com/paritytech/polkadot-sdk/pull/4504 - [ ] release-crates-io-v1.8.0 - https://github.com/paritytech/polkadot-sdk/pull/4505 - [ ] release-crates-io-v1.9.0 - https://github.com/paritytech/polkadot-sdk/pull/4506 - [ ] release-crates-io-v1.10.0 - https://github.com/paritytech/polkadot-sdk/pull/4507 - [ ] release-crates-io-v1.11.0 - https://github.com/paritytech/polkadot-sdk/pull/4508 - [ ] release-crates-io-v1.12.0 (commit soon) --- .../pallets/ethereum-client/src/lib.rs | 18 ++++++++---------- .../pallets/ethereum-client/src/types.rs | 2 +- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index 0ba1b8df465..6a5972ca7a1 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -139,41 +139,39 @@ pub mod pallet { /// Latest imported checkpoint root #[pallet::storage] #[pallet::getter(fn initial_checkpoint_root)] - pub(super) type InitialCheckpointRoot = StorageValue<_, H256, ValueQuery>; + pub type InitialCheckpointRoot = StorageValue<_, H256, ValueQuery>; /// Latest imported finalized block root #[pallet::storage] #[pallet::getter(fn latest_finalized_block_root)] - pub(super) type LatestFinalizedBlockRoot = StorageValue<_, H256, ValueQuery>; + pub type LatestFinalizedBlockRoot = StorageValue<_, H256, ValueQuery>; /// Beacon state by finalized block root #[pallet::storage] #[pallet::getter(fn finalized_beacon_state)] - pub(super) type FinalizedBeaconState = + pub type FinalizedBeaconState = StorageMap<_, Identity, H256, CompactBeaconState, OptionQuery>; /// Finalized Headers: Current position in ring buffer #[pallet::storage] - pub(crate) type FinalizedBeaconStateIndex = StorageValue<_, u32, ValueQuery>; + pub type FinalizedBeaconStateIndex = StorageValue<_, u32, ValueQuery>; /// Finalized Headers: Mapping of ring buffer index to a pruning candidate #[pallet::storage] - pub(crate) type FinalizedBeaconStateMapping = + pub type FinalizedBeaconStateMapping = StorageMap<_, Identity, u32, H256, ValueQuery>; #[pallet::storage] #[pallet::getter(fn validators_root)] - pub(super) type ValidatorsRoot = StorageValue<_, H256, ValueQuery>; + pub type ValidatorsRoot = StorageValue<_, H256, ValueQuery>; /// Sync committee for current period #[pallet::storage] - pub(super) type CurrentSyncCommittee = - StorageValue<_, SyncCommitteePrepared, ValueQuery>; + pub type CurrentSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; /// Sync committee for next period #[pallet::storage] - pub(super) type NextSyncCommittee = - StorageValue<_, SyncCommitteePrepared, ValueQuery>; + pub type NextSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; /// The current operating mode of the pallet. #[pallet::storage] diff --git a/bridges/snowbridge/pallets/ethereum-client/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/src/types.rs index 8808f989754..92b9f77f739 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/types.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/types.rs @@ -18,7 +18,7 @@ pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; pub use primitives::{AncestryProof, ExecutionProof}; /// FinalizedState ring buffer implementation -pub(crate) type FinalizedBeaconStateBuffer = RingBufferMapImpl< +pub type FinalizedBeaconStateBuffer = RingBufferMapImpl< u32, crate::MaxFinalizedHeadersToKeep, crate::FinalizedBeaconStateIndex, -- GitLab From 2e36f571e5c9486819b85561d12fa4001018e953 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Fri, 17 May 2024 14:09:00 +0200 Subject: [PATCH 032/106] Allow pool to be destroyed with an extra (erroneous) consumer reference on the pool account (#4503) addresses https://github.com/paritytech/polkadot-sdk/issues/4440 (will close once we have this in prod runtimes). related: https://github.com/paritytech/polkadot-sdk/issues/2037. An extra consumer reference is preventing pools to be destroyed. When a pool is ready to be destroyed, we can safely clear the consumer references if any. Notably, I only check for one extra consumer reference since that is a known bug. Anything more indicates possibly another issue and we probably don't want to silently absorb those errors as well. After this change, pools with extra consumer reference should be able to destroy normally. --- prdoc/pr_4503.prdoc | 13 +++ substrate/frame/nomination-pools/src/lib.rs | 17 +++- substrate/frame/nomination-pools/src/mock.rs | 3 +- substrate/frame/nomination-pools/src/tests.rs | 86 ++++++++++++++++++ .../nomination-pools/test-staking/src/lib.rs | 89 +++++++++++++++++++ 5 files changed, 206 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_4503.prdoc diff --git a/prdoc/pr_4503.prdoc b/prdoc/pr_4503.prdoc new file mode 100644 index 00000000000..d95a24cc7d6 --- /dev/null +++ b/prdoc/pr_4503.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Patch pool to handle extra consumer ref when destroying. + +doc: + - audience: Runtime User + description: | + An erroneous consumer reference on the pool account is preventing pools from being destroyed. This patch removes the extra reference if it exists when the pool account is destroyed. + +crates: + - name: pallet-nomination-pools + bump: patch diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 0fdb7e3eff5..95d23f2280a 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -2254,6 +2254,7 @@ pub mod pallet { SubPoolsStorage::::get(member.pool_id).ok_or(Error::::SubPoolsNotFound)?; bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; + let pool_account = bonded_pool.bonded_account(); // NOTE: must do this after we have done the `ok_to_withdraw_unbonded_other_with` check. let withdrawn_points = member.withdraw_unlocked(current_era); @@ -2262,7 +2263,7 @@ pub mod pallet { // Before calculating the `balance_to_unbond`, we call withdraw unbonded to ensure the // `transferrable_balance` is correct. let stash_killed = - T::Staking::withdraw_unbonded(bonded_pool.bonded_account(), num_slashing_spans)?; + T::Staking::withdraw_unbonded(pool_account.clone(), num_slashing_spans)?; // defensive-only: the depositor puts enough funds into the stash so that it will only // be destroyed when they are leaving. @@ -2271,6 +2272,20 @@ pub mod pallet { Error::::Defensive(DefensiveError::BondedStashKilledPrematurely) ); + if stash_killed { + // Maybe an extra consumer left on the pool account, if so, remove it. + if frame_system::Pallet::::consumers(&pool_account) == 1 { + frame_system::Pallet::::dec_consumers(&pool_account); + } + + // Note: This is not pretty, but we have to do this because of a bug where old pool + // accounts might have had an extra consumer increment. We know at this point no + // other pallet should depend on pool account so safe to do this. + // Refer to following issues: + // - https://github.com/paritytech/polkadot-sdk/issues/4440 + // - https://github.com/paritytech/polkadot-sdk/issues/2037 + } + let mut sum_unlocked_points: BalanceOf = Zero::zero(); let balance_to_unbond = withdrawn_points .iter() diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 686402b8434..e34719a7b80 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -160,7 +160,8 @@ impl sp_staking::StakingInterface for StakingMock { Pools::on_withdraw(&who, unlocking_before.saturating_sub(unlocking(&staker_map))); UnbondingBalanceMap::set(&unbonding_map); - Ok(UnbondingBalanceMap::get().is_empty() && BondedBalanceMap::get().is_empty()) + Ok(UnbondingBalanceMap::get().get(&who).unwrap().is_empty() && + BondedBalanceMap::get().get(&who).unwrap().is_zero()) } fn bond(stash: &Self::AccountId, value: Self::Balance, _: &Self::AccountId) -> DispatchResult { diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index f6ef1e6eaac..535e7537469 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -4594,6 +4594,92 @@ mod withdraw_unbonded { assert_eq!(ClaimPermissions::::contains_key(20), false); }); } + + #[test] + fn destroy_works_without_erroneous_extra_consumer() { + ExtBuilder::default().ed(1).build_and_execute(|| { + // 10 is the depositor for pool 1, with min join bond 10. + // set pool to destroying. + unsafe_set_state(1, PoolState::Destroying); + + // set current era + CurrentEra::set(1); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + ] + ); + + // move to era when unbonded funds can be withdrawn. + CurrentEra::set(4); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Withdrawn { member: 10, pool_id: 1, points: 10, balance: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::Destroyed { pool_id: 1 }, + ] + ); + + // pool is destroyed. + assert!(!Metadata::::contains_key(1)); + // ensure the pool account is reaped. + assert!(!frame_system::Account::::contains_key(&Pools::create_bonded_account(1))); + }) + } + + #[test] + fn destroy_works_with_erroneous_extra_consumer() { + ExtBuilder::default().ed(1).build_and_execute(|| { + // 10 is the depositor for pool 1, with min join bond 10. + let pool_one = Pools::create_bonded_account(1); + + // set pool to destroying. + unsafe_set_state(1, PoolState::Destroying); + + // set current era + CurrentEra::set(1); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + ] + ); + + // move to era when unbonded funds can be withdrawn. + CurrentEra::set(4); + + // increment consumer by 1 reproducing the erroneous consumer bug. + // refer https://github.com/paritytech/polkadot-sdk/issues/4440. + assert_ok!(frame_system::Pallet::::inc_consumers(&pool_one)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Withdrawn { member: 10, pool_id: 1, points: 10, balance: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::Destroyed { pool_id: 1 }, + ] + ); + + // pool is destroyed. + assert!(!Metadata::::contains_key(1)); + // ensure the pool account is reaped. + assert!(!frame_system::Account::::contains_key(&pool_one)); + }) + } } mod create { diff --git a/substrate/frame/nomination-pools/test-staking/src/lib.rs b/substrate/frame/nomination-pools/test-staking/src/lib.rs index d84e09e32ba..aa913502590 100644 --- a/substrate/frame/nomination-pools/test-staking/src/lib.rs +++ b/substrate/frame/nomination-pools/test-staking/src/lib.rs @@ -193,6 +193,95 @@ fn pool_lifecycle_e2e() { }) } +#[test] +fn destroy_pool_with_erroneous_consumer() { + new_test_ext().execute_with(|| { + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // expect consumers on pool account to be 2 (staking lock and an explicit inc by staking). + assert_eq!(frame_system::Pallet::::consumers(&POOL1_BONDED), 2); + + // increment consumer by 1 reproducing the erroneous consumer bug. + // refer https://github.com/paritytech/polkadot-sdk/issues/4440. + assert_ok!(frame_system::Pallet::::inc_consumers(&POOL1_BONDED)); + assert_eq!(frame_system::Pallet::::consumers(&POOL1_BONDED), 3); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // pool goes into destroying + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying },] + ); + + // move to era 1 + CurrentEra::::set(Some(1)); + + // depositor need to chill before unbonding + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + pallet_staking::Error::::InsufficientBond + ); + + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 10, + pool_id: 1, + points: 50, + balance: 50, + era: 1 + 3 + }] + ); + + // waiting bonding duration: + CurrentEra::::set(Some(1 + 3)); + // this should work even with an extra consumer count on pool account. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1)); + + // pools is fully destroyed now. + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + }) +} + #[test] fn pool_chill_e2e() { new_test_ext().execute_with(|| { -- GitLab From a90d324d5b3252033e00a96d9f9f4890b1cfc982 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 17 May 2024 15:47:01 +0200 Subject: [PATCH 033/106] Contracts: Remove topics for internal events (#4510) --- prdoc/pr_4510.prdoc | 13 ++++ substrate/frame/contracts/src/exec.rs | 58 +++++++------- substrate/frame/contracts/src/lib.rs | 24 +++--- .../frame/contracts/src/storage/meter.rs | 30 +++---- substrate/frame/contracts/src/tests.rs | 78 ++++++++----------- substrate/frame/contracts/src/wasm/mod.rs | 22 +++--- 6 files changed, 107 insertions(+), 118 deletions(-) create mode 100644 prdoc/pr_4510.prdoc diff --git a/prdoc/pr_4510.prdoc b/prdoc/pr_4510.prdoc new file mode 100644 index 00000000000..fbd9bf961fe --- /dev/null +++ b/prdoc/pr_4510.prdoc @@ -0,0 +1,13 @@ +title: "[Contracts] Remove internal topic index" + +doc: + - audience: Runtime Dev + description: | + This PR removes topics from internal events emitted by pallet_contracts. It does not touch the `deposit_event` host function used by + smart contracts that can still include topics. + Event topics incurs significant Storage costs, and are only used by light clients to index events and avoid downloading the entire block. + They are not used by Dapp or Indexers that download the whole block anyway. + +crates: + - name: pallet-contracts + bump: patch diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 31cdadb4bb4..21ebb1e8c5f 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -46,7 +46,7 @@ use sp_core::{ }; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::{ - traits::{Convert, Dispatchable, Hash, Zero}, + traits::{Convert, Dispatchable, Zero}, DispatchError, }; use sp_std::{fmt::Debug, marker::PhantomData, mem, prelude::*, vec::Vec}; @@ -983,16 +983,16 @@ where let caller = self.caller().account_id()?.clone(); // Deposit an instantiation event. - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&caller), T::Hashing::hash_of(account_id)], - Event::Instantiated { deployer: caller, contract: account_id.clone() }, - ); + Contracts::::deposit_event(Event::Instantiated { + deployer: caller, + contract: account_id.clone(), + }); }, (ExportedFunction::Call, Some(code_hash)) => { - Contracts::::deposit_event( - vec![T::Hashing::hash_of(account_id), T::Hashing::hash_of(&code_hash)], - Event::DelegateCalled { contract: account_id.clone(), code_hash }, - ); + Contracts::::deposit_event(Event::DelegateCalled { + contract: account_id.clone(), + code_hash, + }); }, (ExportedFunction::Call, None) => { // If a special limit was set for the sub-call, we enforce it here. @@ -1002,10 +1002,10 @@ where frame.nested_storage.enforce_subcall_limit(contract)?; let caller = self.caller(); - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&caller), T::Hashing::hash_of(&account_id)], - Event::Called { caller: caller.clone(), contract: account_id.clone() }, - ); + Contracts::::deposit_event(Event::Called { + caller: caller.clone(), + contract: account_id.clone(), + }); }, } @@ -1324,13 +1324,10 @@ where .charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(*deposit)); } - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&frame.account_id), T::Hashing::hash_of(&beneficiary)], - Event::Terminated { - contract: frame.account_id.clone(), - beneficiary: beneficiary.clone(), - }, - ); + Contracts::::deposit_event(Event::Terminated { + contract: frame.account_id.clone(), + beneficiary: beneficiary.clone(), + }); Ok(()) } @@ -1422,7 +1419,7 @@ where } fn deposit_event(&mut self, topics: Vec, data: Vec) { - Contracts::::deposit_event( + Contracts::::deposit_indexed_event( topics, Event::ContractEmitted { contract: self.top_frame().account_id.clone(), data }, ); @@ -1527,14 +1524,11 @@ where Self::increment_refcount(hash)?; Self::decrement_refcount(prev_hash); - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&frame.account_id), hash, prev_hash], - Event::ContractCodeUpdated { - contract: frame.account_id.clone(), - new_code_hash: hash, - old_code_hash: prev_hash, - }, - ); + Contracts::::deposit_event(Event::ContractCodeUpdated { + contract: frame.account_id.clone(), + new_code_hash: hash, + old_code_hash: prev_hash, + }); Ok(()) } @@ -1639,7 +1633,7 @@ mod tests { exec::ExportedFunction::*, gas::GasMeter, tests::{ - test_utils::{get_balance, hash, place_contract, set_balance}, + test_utils::{get_balance, place_contract, set_balance}, ExtBuilder, RuntimeCall, RuntimeEvent as MetaEvent, Test, TestFilter, ALICE, BOB, CHARLIE, GAS_LIMIT, }, @@ -3164,7 +3158,7 @@ mod tests { caller: Origin::from_account_id(ALICE), contract: BOB, }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&BOB)], + topics: vec![], }, ] ); @@ -3264,7 +3258,7 @@ mod tests { caller: Origin::from_account_id(ALICE), contract: BOB, }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&BOB)], + topics: vec![], }, ] ); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 3e87eb9f37e..d20f3c15fb5 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -135,7 +135,7 @@ use frame_system::{ use scale_info::TypeInfo; use smallvec::Array; use sp_runtime::{ - traits::{Convert, Dispatchable, Hash, Saturating, StaticLookup, Zero}, + traits::{Convert, Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; use sp_std::{fmt::Debug, prelude::*}; @@ -833,14 +833,11 @@ pub mod pallet { }; >>::increment_refcount(code_hash)?; >>::decrement_refcount(contract.code_hash); - Self::deposit_event( - vec![T::Hashing::hash_of(&dest), code_hash, contract.code_hash], - Event::ContractCodeUpdated { - contract: dest.clone(), - new_code_hash: code_hash, - old_code_hash: contract.code_hash, - }, - ); + Self::deposit_event(Event::ContractCodeUpdated { + contract: dest.clone(), + new_code_hash: code_hash, + old_code_hash: contract.code_hash, + }); contract.code_hash = code_hash; Ok(()) }) @@ -1827,8 +1824,13 @@ impl Pallet { Ok(()) } - /// Deposit a pallet contracts event. Handles the conversion to the overarching event type. - fn deposit_event(topics: Vec, event: Event) { + /// Deposit a pallet contracts event. + fn deposit_event(event: Event) { + >::deposit_event(::RuntimeEvent::from(event)) + } + + /// Deposit a pallet contracts indexed event. + fn deposit_indexed_event(topics: Vec, event: Event) { >::deposit_event_indexed( &topics, ::RuntimeEvent::from(event).into(), diff --git a/substrate/frame/contracts/src/storage/meter.rs b/substrate/frame/contracts/src/storage/meter.rs index 5db9a772ad8..7c55ce5d3f0 100644 --- a/substrate/frame/contracts/src/storage/meter.rs +++ b/substrate/frame/contracts/src/storage/meter.rs @@ -34,10 +34,10 @@ use frame_support::{ DefaultNoBound, RuntimeDebugNoBound, }; use sp_runtime::{ - traits::{Hash as HashT, Saturating, Zero}, + traits::{Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, }; -use sp_std::{fmt::Debug, marker::PhantomData, vec, vec::Vec}; +use sp_std::{fmt::Debug, marker::PhantomData, vec::Vec}; /// Deposit that uses the native fungible's balance type. pub type DepositOf = Deposit>; @@ -551,14 +551,11 @@ impl Ext for ReservingExt { Fortitude::Polite, )?; - Pallet::::deposit_event( - vec![T::Hashing::hash_of(&origin), T::Hashing::hash_of(&contract)], - Event::StorageDepositTransferredAndHeld { - from: origin.clone(), - to: contract.clone(), - amount: *amount, - }, - ); + Pallet::::deposit_event(Event::StorageDepositTransferredAndHeld { + from: origin.clone(), + to: contract.clone(), + amount: *amount, + }); }, Deposit::Refund(amount) => { let transferred = T::Currency::transfer_on_hold( @@ -571,14 +568,11 @@ impl Ext for ReservingExt { Fortitude::Polite, )?; - Pallet::::deposit_event( - vec![T::Hashing::hash_of(&contract), T::Hashing::hash_of(&origin)], - Event::StorageDepositTransferredAndReleased { - from: contract.clone(), - to: origin.clone(), - amount: transferred, - }, - ); + Pallet::::deposit_event(Event::StorageDepositTransferredAndReleased { + from: contract.clone(), + to: origin.clone(), + amount: transferred, + }); if transferred < *amount { // This should never happen, if it does it means that there is a bug in the diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 8fe845fcf0f..251c037d317 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -20,13 +20,13 @@ mod test_debug; use self::{ test_debug::TestDebug, - test_utils::{ensure_stored, expected_deposit, hash}, + test_utils::{ensure_stored, expected_deposit}, }; use crate::{ self as pallet_contracts, chain_extension::{ ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, - Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, + Result as ExtensionResult, RetVal, ReturnFlags, }, exec::{Frame, Key}, migration::codegen::LATEST_MIGRATION_VERSION, @@ -63,7 +63,7 @@ use sp_io::hashing::blake2_256; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, + traits::{BlakeTwo256, Convert, IdentityLookup}, AccountId32, BuildStorage, DispatchError, Perbill, TokenError, }; @@ -97,7 +97,7 @@ macro_rules! assert_refcount { } pub mod test_utils { - use super::{Contracts, DepositPerByte, DepositPerItem, Hash, SysConfig, Test}; + use super::{Contracts, DepositPerByte, DepositPerItem, Test}; use crate::{ exec::AccountIdOf, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, Nonce, PristineCode, @@ -145,9 +145,6 @@ pub mod test_utils { .saturating_mul(info_size) .saturating_add(DepositPerItem::get()) } - pub fn hash(s: &S) -> <::Hashing as Hash>::Output { - <::Hashing as Hash>::hash_of(s) - } pub fn expected_deposit(code_len: usize) -> u64 { // For code_info, the deposit for max_encoded_len is taken. let code_info_len = CodeInfo::::max_encoded_len() as u64; @@ -768,7 +765,7 @@ fn instantiate_and_call_and_deposit_event() { deployer: ALICE, contract: addr.clone() }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -779,7 +776,7 @@ fn instantiate_and_call_and_deposit_event() { amount: test_utils::contract_info_storage_deposit(&addr), } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -1039,7 +1036,7 @@ fn deploy_and_call_other_contract() { deployer: caller_addr.clone(), contract: callee_addr.clone(), }), - topics: vec![hash(&caller_addr), hash(&callee_addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1056,10 +1053,7 @@ fn deploy_and_call_other_contract() { caller: Origin::from_account_id(caller_addr.clone()), contract: callee_addr.clone(), }), - topics: vec![ - hash(&Origin::::from_account_id(caller_addr.clone())), - hash(&callee_addr) - ], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1067,7 +1061,7 @@ fn deploy_and_call_other_contract() { caller: Origin::from_account_id(ALICE), contract: caller_addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&caller_addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1078,7 +1072,7 @@ fn deploy_and_call_other_contract() { amount: test_utils::contract_info_storage_deposit(&callee_addr), } ), - topics: vec![hash(&ALICE), hash(&callee_addr)], + topics: vec![], }, ] ); @@ -1304,7 +1298,7 @@ fn self_destruct_works() { contract: addr.clone(), beneficiary: DJANGO }), - topics: vec![hash(&addr), hash(&DJANGO)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1312,7 +1306,7 @@ fn self_destruct_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1323,7 +1317,7 @@ fn self_destruct_works() { amount: info_deposit, } ), - topics: vec![hash(&addr), hash(&ALICE)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2511,7 +2505,7 @@ fn upload_code_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], },] ); }); @@ -2599,7 +2593,7 @@ fn remove_code_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2608,7 +2602,7 @@ fn remove_code_works() { deposit_released: deposit_expected, remover: ALICE }), - topics: vec![code_hash], + topics: vec![], }, ] ); @@ -2648,7 +2642,7 @@ fn remove_code_wrong_origin() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], },] ); }); @@ -2727,7 +2721,7 @@ fn instantiate_with_zero_balance_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2759,7 +2753,7 @@ fn instantiate_with_zero_balance_works() { deployer: ALICE, contract: addr.clone(), }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2770,7 +2764,7 @@ fn instantiate_with_zero_balance_works() { amount: test_utils::contract_info_storage_deposit(&addr), } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -2812,7 +2806,7 @@ fn instantiate_with_below_existential_deposit_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2853,7 +2847,7 @@ fn instantiate_with_below_existential_deposit_works() { deployer: ALICE, contract: addr.clone(), }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2864,7 +2858,7 @@ fn instantiate_with_below_existential_deposit_works() { amount: test_utils::contract_info_storage_deposit(&addr), } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -2925,7 +2919,7 @@ fn storage_deposit_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2936,7 +2930,7 @@ fn storage_deposit_works() { amount: charged0, } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2944,7 +2938,7 @@ fn storage_deposit_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2955,7 +2949,7 @@ fn storage_deposit_works() { amount: charged1, } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2963,7 +2957,7 @@ fn storage_deposit_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2974,7 +2968,7 @@ fn storage_deposit_works() { amount: refunded0, } ), - topics: vec![hash(&addr.clone()), hash(&ALICE)], + topics: vec![], }, ] ); @@ -3078,7 +3072,7 @@ fn set_code_extrinsic() { new_code_hash, old_code_hash: code_hash, }), - topics: vec![hash(&addr), new_code_hash, code_hash], + topics: vec![], },] ); }); @@ -3230,7 +3224,7 @@ fn set_code_hash() { new_code_hash, old_code_hash: code_hash, }), - topics: vec![hash(&contract_addr), new_code_hash, code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -3238,10 +3232,7 @@ fn set_code_hash() { caller: Origin::from_account_id(ALICE), contract: contract_addr.clone(), }), - topics: vec![ - hash(&Origin::::from_account_id(ALICE)), - hash(&contract_addr) - ], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -3249,10 +3240,7 @@ fn set_code_hash() { caller: Origin::from_account_id(ALICE), contract: contract_addr.clone(), }), - topics: vec![ - hash(&Origin::::from_account_id(ALICE)), - hash(&contract_addr) - ], + topics: vec![], }, ], ); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 8d7f928dba3..b40eb699db9 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -184,10 +184,11 @@ impl WasmBlob { *existing = None; >::remove(&code_hash); - >::deposit_event( - vec![code_hash], - Event::CodeRemoved { code_hash, deposit_released, remover }, - ); + >::deposit_event(Event::CodeRemoved { + code_hash, + deposit_released, + remover, + }); Ok(()) } else { Err(>::CodeNotFound.into()) @@ -271,14 +272,11 @@ impl WasmBlob { self.code_info.refcount = 0; >::insert(code_hash, &self.code); *stored_code_info = Some(self.code_info.clone()); - >::deposit_event( - vec![code_hash], - Event::CodeStored { - code_hash, - deposit_held: deposit, - uploader: self.code_info.owner.clone(), - }, - ); + >::deposit_event(Event::CodeStored { + code_hash, + deposit_held: deposit, + uploader: self.code_info.owner.clone(), + }); Ok(deposit) }, } -- GitLab From 247358a86f874bfa109575dd086a6478dbc96eb4 Mon Sep 17 00:00:00 2001 From: jimwfs Date: Sun, 19 May 2024 23:31:02 +0800 Subject: [PATCH 034/106] chore: fix typos (#4515) Co-authored-by: jimwfs <169986508+jimwfs@users.noreply.github.com> --- polkadot/node/core/approval-voting/src/persisted_entries.rs | 2 +- polkadot/node/gum/src/lib.rs | 2 +- polkadot/node/network/bridge/src/network.rs | 2 +- polkadot/node/network/dispute-distribution/src/receiver/mod.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index 6eeb99cb99f..59a46181005 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -559,7 +559,7 @@ impl BlockEntry { self.distributed_assignments.resize(new_len, false); self.distributed_assignments |= bitfield; - // If the an operation did not change our current bitfied, we return true. + // If the an operation did not change our current bitfield, we return true. let distributed = total_one_bits == self.distributed_assignments.count_ones(); distributed diff --git a/polkadot/node/gum/src/lib.rs b/polkadot/node/gum/src/lib.rs index dad5887af22..f78e20cdecf 100644 --- a/polkadot/node/gum/src/lib.rs +++ b/polkadot/node/gum/src/lib.rs @@ -40,7 +40,7 @@ //! //! ### Log levels //! -//! All of the the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available. +//! All of the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available. //! In decreasing order of priority they are: //! //! - `error!` diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index 5691c8413ad..17d6676b843 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -177,7 +177,7 @@ fn send_message( // network used `Bytes` this would not be necessary. // // peer may have gotten disconnect by the time `send_message()` is called - // at which point the the sink is not available. + // at which point the sink is not available. let last_peer = peers.pop(); peers.into_iter().for_each(|peer| { if let Some(sink) = notification_sinks.get(&(peer_set, peer)) { diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs index 2b3fc45983a..2409e6994f6 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs @@ -132,7 +132,7 @@ enum MuxedMessage { /// A new request has arrived and should be handled. NewRequest(IncomingRequest), - /// Rate limit timer hit - is is time to process one row of messages. + /// Rate limit timer hit - is time to process one row of messages. /// /// This is the result of calling `self.peer_queues.pop_reqs()`. WakePeerQueuesPopReqs(Vec>), -- GitLab From e7b6d7dffd6459174f02598bd8b84fe4b1cb6e72 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Mon, 20 May 2024 03:53:12 +1000 Subject: [PATCH 035/106] `remote-externalities`: `rpc_child_get_keys` to use paged scraping (#4512) Replace usage of deprecated `substrate_rpc_client::ChildStateApi::storage_keys` with `substrate_rpc_client::ChildStateApi::storage_keys_paged`. Required for successful scraping of Aleph Zero state. --- .../frame/remote-externalities/src/lib.rs | 53 +++++++++++++------ 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 201b5e176f3..0ecb98f3134 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -834,30 +834,51 @@ where ) -> Result, &'static str> { let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); - let get_child_keys_closure = || { - #[allow(deprecated)] - substrate_rpc_client::ChildStateApi::storage_keys( - client, - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix.clone(), - Some(at), - ) - }; - let child_keys = - Retry::spawn(retry_strategy, get_child_keys_closure).await.map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + let mut all_child_keys = Vec::new(); + let mut start_key = None; + + loop { + let get_child_keys_closure = || { + let top_key = PrefixedStorageKey::new(prefixed_top_key.0.clone()); + substrate_rpc_client::ChildStateApi::storage_keys_paged( + client, + top_key, + Some(child_prefix.clone()), + Self::DEFAULT_KEY_DOWNLOAD_PAGE, + start_key.clone(), + Some(at), + ) + }; + + let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; + + let keys_count = child_keys.len(); + if keys_count == 0 { + break; + } + + start_key = child_keys.last().cloned(); + all_child_keys.extend(child_keys); + + if keys_count < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { + break; + } + } debug!( target: LOG_TARGET, "[thread = {:?}] scraped {} child-keys of the child-bearing top key: {}", std::thread::current().id(), - child_keys.len(), + all_child_keys.len(), HexDisplay::from(prefixed_top_key) ); - Ok(child_keys) + Ok(all_child_keys) } } -- GitLab From 313fe0f9a277f27a4228634f0fb15a1c3fa21271 Mon Sep 17 00:00:00 2001 From: "polka.dom" Date: Mon, 20 May 2024 02:36:48 -0400 Subject: [PATCH 036/106] Remove usage of the pallet::getter macro from pallet-fast-unstake (#4514) As per #3326, removes pallet::getter macro usage from pallet-fast-unstake. The syntax `StorageItem::::get()` should be used instead. cc @muraca --------- Co-authored-by: Liam Aharon --- prdoc/pr_4514.prdoc | 14 ++++++++++++++ substrate/frame/fast-unstake/src/lib.rs | 7 +++---- 2 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_4514.prdoc diff --git a/prdoc/pr_4514.prdoc b/prdoc/pr_4514.prdoc new file mode 100644 index 00000000000..dab6cab3034 --- /dev/null +++ b/prdoc/pr_4514.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-fast-unstake + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-fast-unstake`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-fast-unstake + bump: major diff --git a/substrate/frame/fast-unstake/src/lib.rs b/substrate/frame/fast-unstake/src/lib.rs index 8ba30620131..f31c9c64026 100644 --- a/substrate/frame/fast-unstake/src/lib.rs +++ b/substrate/frame/fast-unstake/src/lib.rs @@ -141,7 +141,7 @@ macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { log::$level!( target: crate::LOG_TARGET, - concat!("[{:?}] ๐Ÿ’จ ", $patter), >::block_number() $(, $values)* + concat!("[{:?}] ๐Ÿ’จ ", $patter), frame_system::Pallet::::block_number() $(, $values)* ) }; } @@ -227,7 +227,6 @@ pub mod pallet { /// checked. The checking is represented by updating [`UnstakeRequest::checked`], which is /// stored in [`Head`]. #[pallet::storage] - #[pallet::getter(fn eras_to_check_per_block)] pub type ErasToCheckPerBlock = StorageValue<_, u32, ValueQuery>; #[pallet::event] @@ -332,7 +331,7 @@ pub mod pallet { pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; - ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + ensure!(ErasToCheckPerBlock::::get() != 0, Error::::CallNotAllowed); let stash_account = T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; ensure!(!Queue::::contains_key(&stash_account), Error::::AlreadyQueued); @@ -373,7 +372,7 @@ pub mod pallet { pub fn deregister(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; - ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + ensure!(ErasToCheckPerBlock::::get() != 0, Error::::CallNotAllowed); let stash_account = T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; -- GitLab From 278486f9bf7db06c174203f098eec2f91839757a Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 21 May 2024 11:14:42 +0300 Subject: [PATCH 037/106] Remove the prospective-parachains subsystem from collators (#4471) Implements https://github.com/paritytech/polkadot-sdk/issues/4429 Collators only need to maintain the implicit view for the paraid they are collating on. In this case, bypass prospective-parachains entirely. It's still useful to use the GetMinimumRelayParents message from prospective-parachains for validators, because the data is already present there. This enables us to entirely remove the subsystem from collators, which consumed resources needlessly Aims to resolve https://github.com/paritytech/polkadot-sdk/issues/4167 TODO: - [x] fix unit tests --- .../src/tests/prospective_parachains.rs | 21 +- .../src/collator_side/mod.rs | 58 ++- .../tests/prospective_parachains.rs | 102 +++- .../tests/prospective_parachains.rs | 20 +- .../src/v2/tests/mod.rs | 26 +- polkadot/node/service/src/overseer.rs | 4 +- .../src/backing_implicit_view.rs | 463 +++++++++++++++--- prdoc/pr_4471.prdoc | 16 + 8 files changed, 581 insertions(+), 129 deletions(-) create mode 100644 prdoc/pr_4471.prdoc diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 8a72902f081..c93cf21ef7d 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -67,15 +67,6 @@ async fn activate_leaf( .min() .unwrap_or(&leaf_number); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) - ) if parent == leaf_hash => { - tx.send(min_relay_parents).unwrap(); - } - ); - let ancestry_len = leaf_number + 1 - min_min; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) @@ -117,6 +108,18 @@ async fn activate_leaf( tx.send(Ok(Some(header))).unwrap(); } ); + + if requested_len == 0 { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == leaf_hash => { + tx.send(min_relay_parents.clone()).unwrap(); + } + ); + } + requested_len += 1; } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index f227e3855fa..88375d58309 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -264,7 +264,9 @@ struct State { /// never included in the fragment chains of active leaves which do. In /// particular, this means that if a given relay parent belongs to implicit /// ancestry of some active leaf, then it does support prospective parachains. - implicit_view: ImplicitView, + /// + /// It's `None` if the collator is not yet collating for a paraid. + implicit_view: Option, /// All active leaves observed by us, including both that do and do not /// support prospective parachains. This mapping works as a replacement for @@ -334,7 +336,7 @@ impl State { metrics, collating_on: Default::default(), peer_data: Default::default(), - implicit_view: Default::default(), + implicit_view: None, active_leaves: Default::default(), per_relay_parent: Default::default(), span_per_relay_parent: Default::default(), @@ -539,11 +541,12 @@ async fn distribute_collation( .filter(|(_, PeerData { view: v, .. })| match relay_parent_mode { ProspectiveParachainsMode::Disabled => v.contains(&candidate_relay_parent), ProspectiveParachainsMode::Enabled { .. } => v.iter().any(|block_hash| { - state - .implicit_view - .known_allowed_relay_parents_under(block_hash, Some(id)) - .unwrap_or_default() - .contains(&candidate_relay_parent) + state.implicit_view.as_ref().map(|implicit_view| { + implicit_view + .known_allowed_relay_parents_under(block_hash, Some(id)) + .unwrap_or_default() + .contains(&candidate_relay_parent) + }) == Some(true) }), }); @@ -830,6 +833,7 @@ async fn process_msg( match msg { CollateOn(id) => { state.collating_on = Some(id); + state.implicit_view = Some(ImplicitView::new(Some(id))); }, DistributeCollation { candidate_receipt, @@ -1215,7 +1219,10 @@ async fn handle_peer_view_change( Some(ProspectiveParachainsMode::Disabled) => std::slice::from_ref(&added), Some(ProspectiveParachainsMode::Enabled { .. }) => state .implicit_view - .known_allowed_relay_parents_under(&added, state.collating_on) + .as_ref() + .and_then(|implicit_view| { + implicit_view.known_allowed_relay_parents_under(&added, state.collating_on) + }) .unwrap_or_default(), None => { gum::trace!( @@ -1353,21 +1360,22 @@ where state.per_relay_parent.insert(*leaf, PerRelayParent::new(mode)); if mode.is_enabled() { - state - .implicit_view - .activate_leaf(sender, *leaf) - .await - .map_err(Error::ImplicitViewFetchError)?; + if let Some(ref mut implicit_view) = state.implicit_view { + implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; - let allowed_ancestry = state - .implicit_view - .known_allowed_relay_parents_under(leaf, state.collating_on) - .unwrap_or_default(); - for block_hash in allowed_ancestry { - state - .per_relay_parent - .entry(*block_hash) - .or_insert_with(|| PerRelayParent::new(mode)); + let allowed_ancestry = implicit_view + .known_allowed_relay_parents_under(leaf, state.collating_on) + .unwrap_or_default(); + + for block_hash in allowed_ancestry { + state + .per_relay_parent + .entry(*block_hash) + .or_insert_with(|| PerRelayParent::new(mode)); + } } } } @@ -1378,7 +1386,11 @@ where // of implicit ancestry. Only update the state after the hash is actually // pruned from the block info storage. let pruned = if mode.is_enabled() { - state.implicit_view.deactivate_leaf(*leaf) + state + .implicit_view + .as_mut() + .map(|view| view.deactivate_leaf(*leaf)) + .unwrap_or_default() } else { vec![*leaf] }; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 70705354563..2a147aef69e 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -18,7 +18,7 @@ use super::*; -use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; +use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = @@ -31,7 +31,6 @@ fn get_parent_hash(hash: Hash) -> Hash { /// Handle a view update. async fn update_view( virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. activated: u8, // How many new heads does this update contain? ) { @@ -61,21 +60,88 @@ async fn update_view( let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), - ) if parent == leaf_hash => { - tx.send(vec![(test_state.para_id, min_number)]).unwrap(); - } - ); - let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) .take(ancestry_len as usize); let ancestry_numbers = (min_number..=leaf_number).rev(); let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + if let Some((hash, number)) = ancestry_iter.next() { + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::ChainApi(ChainApiMessage::BlockHeader(.., tx)) => { + let header = Header { + parent_hash: get_parent_hash(hash), + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + .., + RuntimeApiRequest::AsyncBackingParams( + tx + ) + ) + ) => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + .., + RuntimeApiRequest::SessionIndexForChild( + tx + ) + ) + ) => { + tx.send(Ok(1)).unwrap(); + } + ); + + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::ChainApi( + ChainApiMessage::Ancestors { + k, + response_channel: tx, + .. + } + ) => { + assert_eq!(k, ASYNC_BACKING_PARAMETERS.allowed_ancestry_len as usize); + + tx.send(Ok(ancestry_hashes.clone().skip(1).into_iter().collect())).unwrap(); + } + ); + } + + for _ in ancestry_iter.clone() { + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + .., + RuntimeApiRequest::SessionIndexForChild( + tx + ) + ) + ) => { + tx.send(Ok(1)).unwrap(); + } + ); + } + while let Some((hash, number)) = ancestry_iter.next() { // May be `None` for the last element. let parent_hash = @@ -195,7 +261,7 @@ fn distribute_collation_from_implicit_view() { overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; // Activated leaf is `b`, but the collation will be based on `c`. - update_view(virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(virtual_overseer, vec![(head_b, head_b_num)], 1).await; let validator_peer_ids = test_state.current_group_validator_peer_ids(); for (val, peer) in test_state @@ -258,7 +324,7 @@ fn distribute_collation_from_implicit_view() { // Head `c` goes out of view. // Build a different candidate for this relay parent and attempt to distribute it. - update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; let parent_head_data_hash = Hash::repeat_byte(0xBB); @@ -318,7 +384,7 @@ fn distribute_collation_up_to_limit() { overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { let pov = PoV { block_data: BlockData(vec![i as u8]) }; @@ -402,7 +468,7 @@ fn send_parent_head_data_for_elastic_scaling() { CollatorProtocolMessage::CollateOn(test_state.para_id), ) .await; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, vec![(head_b, head_b_num)], 1).await; let pov_data = PoV { block_data: BlockData(vec![1 as u8]) }; let candidate = TestCandidateBuilder { @@ -517,8 +583,8 @@ fn advertise_and_send_collation_by_hash() { CollatorProtocolMessage::CollateOn(test_state.para_id), ) .await; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; - update_view(&mut virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(&mut virtual_overseer, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, vec![(head_a, head_a_num)], 1).await; let candidates: Vec<_> = (0..2) .map(|i| { @@ -638,7 +704,7 @@ fn advertise_core_occupied() { overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; let candidate = TestCandidateBuilder { diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 785690121da..178dcb85e03 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -116,15 +116,6 @@ pub(super) async fn update_view( let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), - ) if parent == leaf_hash => { - tx.send(test_state.chain_ids.iter().map(|para_id| (*para_id, min_number)).collect()).unwrap(); - } - ); - let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) .take(ancestry_len as usize); @@ -166,6 +157,17 @@ pub(super) async fn update_view( } ); + if requested_len == 0 { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), + ) if parent == leaf_hash => { + tx.send(test_state.chain_ids.iter().map(|para_id| (*para_id, min_number)).collect()).unwrap(); + } + ); + } + requested_len += 1; } } diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index d32e2323ba3..f9a484f47a9 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -586,19 +586,6 @@ async fn handle_leaf_activation( } ); - let mrp_response: Vec<(ParaId, BlockNumber)> = para_data - .iter() - .map(|(para_id, data)| (*para_id, data.min_relay_parent)) - .collect(); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) - ) if parent == *hash => { - tx.send(mrp_response).unwrap(); - } - ); - let header = Header { parent_hash: *parent_hash, number: *number, @@ -615,6 +602,19 @@ async fn handle_leaf_activation( } ); + let mrp_response: Vec<(ParaId, BlockNumber)> = para_data + .iter() + .map(|(para_id, data)| (*para_id, data.min_relay_parent)) + .collect(); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == *hash => { + tx.send(mrp_response).unwrap(); + } + ); + loop { match virtual_overseer.recv().await { AllMessages::RuntimeApi(RuntimeApiMessage::Request( diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 26b1446bf51..4b7777a0967 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -385,7 +385,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, - ProspectiveParachainsSubsystem, + DummySubsystem, >, Error, > @@ -462,7 +462,7 @@ where .dispute_coordinator(DummySubsystem) .dispute_distribution(DummySubsystem) .chain_selection(DummySubsystem) - .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) + .prospective_parachains(DummySubsystem) .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index a14536a1766..23a758d2571 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -17,23 +17,45 @@ use futures::channel::oneshot; use polkadot_node_subsystem::{ errors::ChainApiError, - messages::{ChainApiMessage, ProspectiveParachainsMessage}, + messages::{ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage}, SubsystemSender, }; use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; +use crate::{ + request_session_index_for_child, + runtime::{self, prospective_parachains_mode, recv_runtime, ProspectiveParachainsMode}, +}; + // Always aim to retain 1 block before the active leaves. const MINIMUM_RETAIN_LENGTH: BlockNumber = 2; /// Handles the implicit view of the relay chain derived from the immediate view, which /// is composed of active leaves, and the minimum relay-parents allowed for /// candidates of various parachains at those leaves. -#[derive(Default, Clone)] +#[derive(Clone)] pub struct View { leaves: HashMap, block_info_storage: HashMap, + collating_for: Option, +} + +impl View { + /// Create a new empty view. + /// If `collating_for` is `Some`, the node is a collator and is only interested in the allowed + /// relay parents of a single paraid. When this is true, prospective-parachains is no longer + /// queried. + pub fn new(collating_for: Option) -> Self { + Self { leaves: Default::default(), block_info_storage: Default::default(), collating_for } + } +} + +impl Default for View { + fn default() -> Self { + Self::new(None) + } } // Minimum relay parents implicitly relative to a particular block. @@ -106,15 +128,13 @@ impl View { } /// Activate a leaf in the view. - /// This will request the minimum relay parents from the - /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each - /// leaf in the view as needed. These are the 'implicit ancestors' of the leaf. + /// This will request the minimum relay parents the leaf and will load headers in the + /// ancestry of the leaf as needed. These are the 'implicit ancestors' of the leaf. /// /// To maximize reuse of outdated leaves, it's best to activate new leaves before /// deactivating old ones. /// - /// This returns a list of para-ids which are relevant to the leaf, - /// and the allowed relay parents for these paras under this leaf can be + /// The allowed relay parents for the relevant paras under this leaf can be /// queried with [`View::known_allowed_relay_parents_under`]. /// /// No-op for known leaves. @@ -122,10 +142,11 @@ impl View { &mut self, sender: &mut Sender, leaf_hash: Hash, - ) -> Result, FetchError> + ) -> Result<(), FetchError> where - Sender: SubsystemSender, - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, { if self.leaves.contains_key(&leaf_hash) { return Err(FetchError::AlreadyKnown) @@ -135,6 +156,7 @@ impl View { leaf_hash, &mut self.block_info_storage, &mut *sender, + self.collating_for, ) .await; @@ -150,7 +172,7 @@ impl View { self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum }); - Ok(fetched.relevant_paras) + Ok(()) }, Err(e) => Err(e), } @@ -249,6 +271,10 @@ pub enum FetchError { /// Request to the Chain API subsystem failed. #[error("The chain API subsystem was unavailable")] ChainApiUnavailable, + + /// Request to the runtime API failed. + #[error("Runtime API error: {0}")] + RuntimeApi(#[from] runtime::Error), } /// Reasons a block header might have been unavailable. @@ -265,30 +291,92 @@ pub enum BlockHeaderUnavailableReason { struct FetchSummary { minimum_ancestor_number: BlockNumber, leaf_number: BlockNumber, - relevant_paras: Vec, } -async fn fetch_fresh_leaf_and_insert_ancestry( +// Request the min relay parents from prospective-parachains. +async fn fetch_min_relay_parents_from_prospective_parachains< + Sender: SubsystemSender, +>( leaf_hash: Hash, - block_info_storage: &mut HashMap, sender: &mut Sender, -) -> Result +) -> Result, FetchError> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) + .await; + + rx.await.map_err(|_| FetchError::ProspectiveParachainsUnavailable) +} + +// Request the min relay parent for the purposes of a collator, directly using ChainApi (where +// prospective-parachains is not available). +async fn fetch_min_relay_parents_for_collator( + leaf_hash: Hash, + leaf_number: BlockNumber, + sender: &mut Sender, +) -> Result, FetchError> where - Sender: SubsystemSender, - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, { - let min_relay_parents_raw = { - let (tx, rx) = oneshot::channel(); - sender - .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) - .await; + let Ok(ProspectiveParachainsMode::Enabled { allowed_ancestry_len, .. }) = + prospective_parachains_mode(sender, leaf_hash).await + else { + // This should never happen, leaves that don't have prospective parachains mode enabled + // should not use implicit view. + return Ok(None) + }; - match rx.await { - Ok(m) => m, - Err(_) => return Err(FetchError::ProspectiveParachainsUnavailable), + // Fetch the session of the leaf. We must make sure that we stop at the ancestor which has a + // different session index. + let required_session = + recv_runtime(request_session_index_for_child(leaf_hash, sender).await).await?; + + let mut min = leaf_number; + + // Fetch the ancestors, up to allowed_ancestry_len. + let (tx, rx) = oneshot::channel(); + sender + .send_message(ChainApiMessage::Ancestors { + hash: leaf_hash, + k: allowed_ancestry_len, + response_channel: tx, + }) + .await; + let hashes = rx + .await + .map_err(|_| FetchError::ChainApiUnavailable)? + .map_err(|err| FetchError::ChainApiError(leaf_hash, err))?; + + for hash in hashes { + // The relay chain cannot accept blocks backed from previous sessions, with + // potentially previous validators. This is a technical limitation we need to + // respect here. + let session = recv_runtime(request_session_index_for_child(hash, sender).await).await?; + + if session == required_session { + // We should never underflow here, the ChainAPI stops at genesis block. + min = min.saturating_sub(1); + } else { + break } - }; + } + Ok(Some(min)) +} + +async fn fetch_fresh_leaf_and_insert_ancestry( + leaf_hash: Hash, + block_info_storage: &mut HashMap, + sender: &mut Sender, + collating_for: Option, +) -> Result +where + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, +{ let leaf_header = { let (tx, rx) = oneshot::channel(); sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; @@ -313,8 +401,18 @@ where } }; - let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); - let relevant_paras = min_relay_parents_raw.iter().map(|x| x.0).collect(); + // If the node is a collator, bypass prospective-parachains. We're only interested in the one + // paraid and the subsystem is not present. + let min_relay_parents = if let Some(para_id) = collating_for { + fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) + .await? + .map(|x| vec![(para_id, x)]) + .unwrap_or_default() + } else { + fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? + }; + + let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; let ancestry = if leaf_header.number > 0 { @@ -380,14 +478,11 @@ where vec![leaf_hash] }; - let fetched_ancestry = FetchSummary { - minimum_ancestor_number: min_min, - leaf_number: leaf_header.number, - relevant_paras, - }; + let fetched_ancestry = + FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; let allowed_relay_parents = AllowedRelayParents { - minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(), + minimum_relay_parents: min_relay_parents.into_iter().collect(), allowed_relay_parents_contiguous: ancestry, }; @@ -408,12 +503,12 @@ mod tests { use crate::TimeoutExt; use assert_matches::assert_matches; use futures::future::{join, FutureExt}; - use polkadot_node_subsystem::AllMessages; + use polkadot_node_subsystem::{messages::RuntimeApiRequest, AllMessages}; use polkadot_node_subsystem_test_helpers::{ make_subsystem_context, TestSubsystemContextHandle, }; use polkadot_overseer::SubsystemContext; - use polkadot_primitives::Header; + use polkadot_primitives::{AsyncBackingParams, Header}; use sp_core::testing::TaskExecutor; use std::time::Duration; @@ -514,6 +609,71 @@ mod tests { ); } + async fn assert_async_backing_params_request( + virtual_overseer: &mut VirtualOverseer, + leaf: Hash, + params: AsyncBackingParams, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + leaf_hash, + RuntimeApiRequest::AsyncBackingParams( + tx + ) + ) + ) => { + assert_eq!(leaf, leaf_hash, "received unexpected leaf hash"); + tx.send(Ok(params)).unwrap(); + } + ); + } + + async fn assert_session_index_request( + virtual_overseer: &mut VirtualOverseer, + leaf: Hash, + session: u32, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + leaf_hash, + RuntimeApiRequest::SessionIndexForChild( + tx + ) + ) + ) => { + assert_eq!(leaf, leaf_hash, "received unexpected leaf hash"); + tx.send(Ok(session)).unwrap(); + } + ); + } + + async fn assert_ancestors_request( + virtual_overseer: &mut VirtualOverseer, + leaf: Hash, + expected_ancestor_len: u32, + response: Vec, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi( + ChainApiMessage::Ancestors { + hash: leaf_hash, + k, + response_channel: tx + } + ) => { + assert_eq!(leaf, leaf_hash, "received unexpected leaf hash"); + assert_eq!(k, expected_ancestor_len as usize); + + tx.send(Ok(response)).unwrap(); + } + ); + } + #[test] fn construct_fresh_view() { let pool = TaskExecutor::new(); @@ -521,6 +681,8 @@ mod tests { let mut view = View::default(); + assert_eq!(view.collating_for, None); + // Chain B. const PARA_A_MIN_PARENT: u32 = 4; const PARA_B_MIN_PARENT: u32 = 3; @@ -528,15 +690,17 @@ mod tests { let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT), (PARA_B, PARA_B_MIN_PARENT)]; let leaf = CHAIN_B.last().unwrap(); + let leaf_idx = CHAIN_B.len() - 1; let min_min_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize; let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_A, PARA_B]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..]).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..leaf_idx]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); @@ -558,6 +722,11 @@ mod tests { allowed_relay_parents.allowed_relay_parents_contiguous, expected_ancestry ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..(PARA_A_MIN_PARENT - 1) as usize])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)), Some(&expected_ancestry[..])); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); } ); @@ -566,18 +735,188 @@ mod tests { let prospective_response = vec![(PARA_C, PARA_C_MIN_PARENT)]; let leaf = CHAIN_A.last().unwrap(); let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + let leaf_idx = blocks.len() - 1; let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_C]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[..leaf_idx]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_eq!(view.leaves.len(), 2); + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_C], GENESIS_NUMBER); + let expected_ancestry: Vec = + blocks[..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)), Some(&expected_ancestry[..])); + + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)).unwrap().is_empty()); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); + } + ); + } + + #[test] + fn construct_fresh_view_single_para() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::new(Some(PARA_A)); + + assert_eq!(view.collating_for, Some(PARA_A)); + + // Chain B. + const PARA_A_MIN_PARENT: u32 = 4; + + let current_session = 2; + + let leaf = CHAIN_B.last().unwrap(); + let leaf_idx = CHAIN_B.len() - 1; + let min_min_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; + + assert_async_backing_params_request( + &mut ctx_handle, + *leaf, + AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: PARA_A_MIN_PARENT, + }, + ) + .await; + + assert_session_index_request(&mut ctx_handle, *leaf, current_session).await; + + assert_ancestors_request( + &mut ctx_handle, + *leaf, + PARA_A_MIN_PARENT, + CHAIN_B[min_min_idx..leaf_idx].iter().copied().rev().collect(), + ) + .await; + + for hash in CHAIN_B[min_min_idx..leaf_idx].into_iter().rev() { + assert_session_index_request(&mut ctx_handle, *hash, current_session).await; + } + + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..leaf_idx]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); + for i in min_min_idx..(CHAIN_B.len() - 1) { + // No allowed relay parents constructed for ancestry. + assert!(view.known_allowed_relay_parents_under(&CHAIN_B[i], None).is_none()); + } + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT); + let expected_ancestry: Vec = + CHAIN_B[min_min_idx..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..])); + + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + } + ); + + // Suppose the whole test chain A is allowed up to genesis for para A, but the genesis block + // is in a different session. + let leaf = CHAIN_A.last().unwrap(); + let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + let leaf_idx = blocks.len() - 1; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; + + assert_async_backing_params_request( + &mut ctx_handle, + *leaf, + AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: blocks.len() as u32, + }, + ) + .await; + + assert_session_index_request(&mut ctx_handle, *leaf, current_session).await; + + assert_ancestors_request( + &mut ctx_handle, + *leaf, + blocks.len() as u32, + blocks[..leaf_idx].iter().rev().copied().collect(), + ) + .await; + + for hash in blocks[1..leaf_idx].into_iter().rev() { + assert_session_index_request(&mut ctx_handle, *hash, current_session).await; + } + + assert_session_index_request(&mut ctx_handle, GENESIS_HASH, 0).await; + + // We won't request for the genesis block + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[1..leaf_idx]).await; + }; + + futures::executor::block_on(join(fut, overseer_fut)); + assert_eq!(view.leaves.len(), 2); + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], 1); + let expected_ancestry: Vec = + CHAIN_A[..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..])); + + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + } + ); } #[test] @@ -595,15 +934,20 @@ mod tests { let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; let fut = view.activate_leaf(ctx.sender(), leaf_a).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_A]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[(leaf_a_number - 1)..leaf_a_number], + ) + .await; assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; assert_block_header_requests( &mut ctx_handle, CHAIN_B, - &CHAIN_B[min_min_idx..leaf_a_number], + &CHAIN_B[min_min_idx..(leaf_a_number - 1)], ) .await; }; @@ -617,15 +961,20 @@ mod tests { let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)]; let fut = view.activate_leaf(ctx.sender(), leaf_b).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_B]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[(leaf_b_number - 1)..leaf_b_number], + ) + .await; assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; assert_block_header_requests( &mut ctx_handle, CHAIN_B, - &CHAIN_B[leaf_a_number..leaf_b_number], // Note the expected range. + &CHAIN_B[leaf_a_number..(leaf_b_number - 1)], // Note the expected range. ) .await; }; @@ -665,13 +1014,15 @@ mod tests { .timeout(TIMEOUT) .map(|res| res.unwrap().unwrap()); let overseer_fut = async { - assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; assert_block_header_requests( &mut ctx_handle, CHAIN_B, - &CHAIN_B[min_a_idx..=leaf_a_idx], + &CHAIN_B[leaf_a_idx..(leaf_a_idx + 1)], ) .await; + assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_a_idx..leaf_a_idx]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); @@ -689,8 +1040,11 @@ mod tests { .timeout(TIMEOUT) .map(|res| res.expect("`activate_leaf` timed out").unwrap()); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &blocks[(blocks.len() - 1)..]) + .await; assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_B, blocks).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &blocks[..(blocks.len() - 1)]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); @@ -721,19 +1075,18 @@ mod tests { let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; let fut = view.activate_leaf(ctx.sender(), GENESIS_HASH).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_A]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, &[GENESIS_HASH], &[GENESIS_HASH]).await; assert_min_relay_parents_request(&mut ctx_handle, &GENESIS_HASH, prospective_response) .await; - assert_block_header_requests(&mut ctx_handle, &[GENESIS_HASH], &[GENESIS_HASH]).await; }; futures::executor::block_on(join(fut, overseer_fut)); assert_matches!( view.known_allowed_relay_parents_under(&GENESIS_HASH, None), - Some(hashes) if !hashes.is_empty() + Some(hashes) if hashes == &[GENESIS_HASH] ); } } diff --git a/prdoc/pr_4471.prdoc b/prdoc/pr_4471.prdoc new file mode 100644 index 00000000000..6d589be81fd --- /dev/null +++ b/prdoc/pr_4471.prdoc @@ -0,0 +1,16 @@ +title: "Remove prospective-parachains subsystem from collator nodes" + +doc: + - audience: Node Dev + description: | + Removes the prospective-parachains subsystem from collators. The GetMinimumRelayParents of the implicit view + is replaced by direct ChainAPI and runtime calls. The subsystem was causing performance problems when collating + connected to an RPC node, due to the high number of runtime API calls, which were unneccessary for a collator. + +crates: + - name: polkadot-collator-protocol + bump: minor + - name: polkadot-service + bump: minor + - name: polkadot-node-subsystem-util + bump: minor -- GitLab From b00e168129cc38d68fb170dc3dc581cf5f17f5b1 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 21 May 2024 11:24:26 +0200 Subject: [PATCH 038/106] [xcm]: Use latest `Versioned*` instead of `V4` + bridges doc nits (#4528) Co-authored-by: Svyatoslav Nikolsky --- .../src/messages_benchmarking.rs | 19 +++--- bridges/modules/beefy/src/lib.rs | 2 +- bridges/modules/grandpa/src/lib.rs | 2 +- bridges/modules/messages/README.md | 12 ++-- bridges/modules/messages/src/lib.rs | 2 +- bridges/modules/parachains/src/lib.rs | 2 +- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/testing/penpal/src/lib.rs | 34 +++++++---- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- .../tests/fee_estimation.rs | 61 +++++++++++-------- .../xcm-fee-payment-runtime-api/tests/mock.rs | 44 ++++++++----- 13 files changed, 107 insertions(+), 79 deletions(-) diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index 0c7a9ad1a83..74494f79080 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -271,7 +271,7 @@ pub fn generate_xcm_builder_bridge_message_sample( move |expected_message_size| -> MessagePayload { // For XCM bridge hubs, it is the message that // will be pushed further to some XCM queue (XCMP/UMP) - let location = xcm::VersionedInteriorLocation::V4(destination.clone()); + let location = xcm::VersionedInteriorLocation::from(destination.clone()); let location_encoded_size = location.encoded_size(); // we don't need to be super-precise with `expected_size` here @@ -294,16 +294,13 @@ pub fn generate_xcm_builder_bridge_message_sample( expected_message_size, location_encoded_size, xcm_size, xcm_data_size, ); - let xcm = xcm::VersionedXcm::<()>::V4( - vec![Instruction::<()>::ExpectPallet { - index: 0, - name: vec![42; xcm_data_size], - module_name: vec![], - crate_major: 0, - min_crate_minor: 0, - }] - .into(), - ); + let xcm = xcm::VersionedXcm::<()>::from(Xcm(vec![Instruction::<()>::ExpectPallet { + index: 0, + name: vec![42; xcm_data_size], + module_name: vec![], + crate_major: 0, + min_crate_minor: 0, + }])); // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor // or public fields, so just tuple diff --git a/bridges/modules/beefy/src/lib.rs b/bridges/modules/beefy/src/lib.rs index 27c83921021..ccddcde920f 100644 --- a/bridges/modules/beefy/src/lib.rs +++ b/bridges/modules/beefy/src/lib.rs @@ -316,7 +316,7 @@ pub mod pallet { /// Pallet owner has the right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. `democracy::referendum` to update halt - /// flag directly or calling `halt_operations`). + /// flag directly or calling `set_operating_mode`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index a927882aaaa..4569fc2b19f 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -423,7 +423,7 @@ pub mod pallet { /// Pallet owner has a right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). + /// flag directly or call the `set_operating_mode`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md index fe62305748c..c06b96b857d 100644 --- a/bridges/modules/messages/README.md +++ b/bridges/modules/messages/README.md @@ -187,11 +187,13 @@ There may be a special account in every runtime where the messages module is dep owner', is like a module-level sudo account - he's able to halt and resume all module operations without requiring runtime upgrade. Calls that are related to this account are: - `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; -- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all module operations. After - this call, all message-related transactions will be rejected until further `resume_operations` call'. This call may be - used when something extraordinary happens with the bridge; -- `fn resume_operations()`: module owner may call this function to resume bridge operations. The module will resume its - regular operations after this call. +- `fn set_operating_mode()`: the module owner (or sudo account) may call this function to pause/resume + pallet operations. Owner may halt the pallet by calling this method with + `MessagesOperatingMode::Basic(BasicOperatingMode::Halted)` argument - all message-related + transactions will be rejected. Owner may then resume pallet operations by passing the + `MessagesOperatingMode::Basic(BasicOperatingMode::Normal)` argument. There's also + `MessagesOperatingMode::RejectingOutboundMessages` pallet mode, where it still accepts all incoming + messages, but all outbound messages are rejected. If pallet owner is not defined, the governance may be used to make those calls. diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index bc00db9eba5..e31a4542056 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -573,7 +573,7 @@ pub mod pallet { /// Pallet owner has a right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). + /// flag directly or call the `set_operating_mode`). #[pallet::storage] #[pallet::getter(fn module_owner)] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId>; diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs index 61e04aed377..d323aef3b22 100644 --- a/bridges/modules/parachains/src/lib.rs +++ b/bridges/modules/parachains/src/lib.rs @@ -260,7 +260,7 @@ pub mod pallet { /// Pallet owner has a right to halt all pallet operations and then resume them. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). + /// flag directly or call the `set_operating_mode`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index b0df11e1046..536736c994e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1350,7 +1350,7 @@ impl_runtime_apis! { let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 062babef18d..bc99e54e707 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1386,7 +1386,7 @@ impl_runtime_apis! { let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 86a8b0f1d9e..8afe56cddef 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -849,24 +849,32 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - if !matches!(xcm_version, 3 | 4) { - return Err(XcmPaymentApiError::UnhandledXcmVersion); - } - Ok([VersionedAssetId::V4(xcm_config::RelayLocation::get().into())] + let acceptable = vec![ + // native token + VersionedAssetId::from(AssetLocationId(xcm_config::RelayLocation::get())) + ]; + + Ok(acceptable .into_iter() .filter_map(|asset| asset.into_version(xcm_version).ok()) .collect()) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let local_asset = VersionedAssetId::V4(xcm_config::RelayLocation::get().into()); - let asset = asset - .into_version(4) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; - - if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } - - Ok(WeightToFee::weight_to_fee(&weight)) + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } } fn query_xcm_weight(message: VersionedXcm<()>) -> Result { @@ -897,7 +905,7 @@ impl_runtime_apis! { let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c22d5c39b23..b411cd55149 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1823,7 +1823,7 @@ sp_api::impl_runtime_apis! { let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b62c6d08201..d8a444c41ac 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2257,7 +2257,7 @@ sp_api::impl_runtime_apis! { let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs index 7a9bfa4a796..25a68090c22 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs @@ -52,13 +52,15 @@ fn fee_estimation_for_teleport() { let runtime_api = client.runtime_api(); let extrinsic = TestXt::new( RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { - dest: Box::new(VersionedLocation::V4((Parent, Parachain(1000)).into())), - beneficiary: Box::new(VersionedLocation::V4( - AccountId32 { id: [0u8; 32], network: None }.into(), - )), - assets: Box::new(VersionedAssets::V4( - vec![(Here, 100u128).into(), (Parent, 20u128).into()].into(), - )), + dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), + beneficiary: Box::new(VersionedLocation::from(AccountId32 { + id: [0u8; 32], + network: None, + })), + assets: Box::new(VersionedAssets::from(vec![ + (Here, 100u128).into(), + (Parent, 20u128).into(), + ])), fee_asset_item: 1, // Fees are paid with the RelayToken weight_limit: Unlimited, }), @@ -69,7 +71,7 @@ fn fee_estimation_for_teleport() { assert_eq!( dry_run_effects.local_xcm, - Some(VersionedXcm::V4( + Some(VersionedXcm::from( Xcm::builder_unsafe() .withdraw_asset((Parent, 20u128)) .burn_asset((Parent, 20u128)) @@ -89,8 +91,8 @@ fn fee_estimation_for_teleport() { assert_eq!( dry_run_effects.forwarded_xcms, vec![( - VersionedLocation::V4(send_destination.clone()), - vec![VersionedXcm::V4(send_message.clone())], + VersionedLocation::from(send_destination.clone()), + vec![VersionedXcm::from(send_message.clone())], ),], ); @@ -153,7 +155,7 @@ fn fee_estimation_for_teleport() { .query_weight_to_asset_fee( H256::zero(), weight, - VersionedAssetId::V4(HereLocation::get().into()), + VersionedAssetId::from(AssetId(HereLocation::get())), ) .unwrap() .unwrap(); @@ -168,7 +170,7 @@ fn fee_estimation_for_teleport() { .query_delivery_fees(H256::zero(), destination.clone(), remote_message.clone()) .unwrap() .unwrap(); - assert_eq!(delivery_fees, VersionedAssets::V4((Here, 20u128).into())); + assert_eq!(delivery_fees, VersionedAssets::from((Here, 20u128))); // This would have to be the runtime API of the destination, // which we have the location for. @@ -182,7 +184,7 @@ fn fee_estimation_for_teleport() { .query_weight_to_asset_fee( H256::zero(), remote_execution_weight, - VersionedAssetId::V4(HereLocation::get().into()), + VersionedAssetId::from(AssetId(HereLocation::get())), ) .unwrap() .unwrap(); @@ -216,11 +218,12 @@ fn dry_run_reserve_asset_transfer() { let runtime_api = client.runtime_api(); let extrinsic = TestXt::new( RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { - dest: Box::new(VersionedLocation::V4((Parent, Parachain(1000)).into())), - beneficiary: Box::new(VersionedLocation::V4( - AccountId32 { id: [0u8; 32], network: None }.into(), - )), - assets: Box::new(VersionedAssets::V4((Parent, 100u128).into())), + dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), + beneficiary: Box::new(VersionedLocation::from(AccountId32 { + id: [0u8; 32], + network: None, + })), + assets: Box::new(VersionedAssets::from((Parent, 100u128))), fee_asset_item: 0, weight_limit: Unlimited, }), @@ -231,7 +234,7 @@ fn dry_run_reserve_asset_transfer() { assert_eq!( dry_run_effects.local_xcm, - Some(VersionedXcm::V4( + Some(VersionedXcm::from( Xcm::builder_unsafe() .withdraw_asset((Parent, 100u128)) .burn_asset((Parent, 100u128)) @@ -251,8 +254,8 @@ fn dry_run_reserve_asset_transfer() { assert_eq!( dry_run_effects.forwarded_xcms, vec![( - VersionedLocation::V4(send_destination.clone()), - vec![VersionedXcm::V4(send_message.clone())], + VersionedLocation::from(send_destination.clone()), + vec![VersionedXcm::from(send_message.clone())], ),], ); @@ -310,11 +313,15 @@ fn dry_run_xcm() { let client = TestClient; let runtime_api = client.runtime_api(); let xcm_weight = runtime_api - .query_xcm_weight(H256::zero(), VersionedXcm::V4(xcm_to_weigh.clone().into())) + .query_xcm_weight(H256::zero(), VersionedXcm::from(xcm_to_weigh.clone().into())) .unwrap() .unwrap(); let execution_fees = runtime_api - .query_weight_to_asset_fee(H256::zero(), xcm_weight, VersionedAssetId::V4(Here.into())) + .query_weight_to_asset_fee( + H256::zero(), + xcm_weight, + VersionedAssetId::from(AssetId(Here.into())), + ) .unwrap() .unwrap(); let xcm = Xcm::::builder_unsafe() @@ -331,16 +338,16 @@ fn dry_run_xcm() { let dry_run_effects = runtime_api .dry_run_xcm( H256::zero(), - VersionedLocation::V4(AccountIndex64 { index: 1, network: None }.into()), - VersionedXcm::V4(xcm), + VersionedLocation::from([AccountIndex64 { index: 1, network: None }]), + VersionedXcm::from(xcm), ) .unwrap() .unwrap(); assert_eq!( dry_run_effects.forwarded_xcms, vec![( - VersionedLocation::V4((Parent, Parachain(2100)).into()), - vec![VersionedXcm::V4( + VersionedLocation::from((Parent, Parachain(2100))), + vec![VersionedXcm::from( Xcm::<()>::builder_unsafe() .reserve_asset_deposited(( (Parent, Parachain(2000)), diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs index d7b18d90a50..a1794ab99de 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs @@ -429,8 +429,11 @@ impl sp_api::ProvideRuntimeApi for TestClient { sp_api::mock_impl_runtime_apis! { impl XcmPaymentApi for RuntimeApi { fn query_acceptable_payment_assets(xcm_version: XcmVersion) -> Result, XcmPaymentApiError> { - if xcm_version != 4 { return Err(XcmPaymentApiError::UnhandledXcmVersion) }; - Ok(vec![VersionedAssetId::V4(HereLocation::get().into())]) + Ok(vec![ + VersionedAssetId::from(AssetId(HereLocation::get())) + .into_version(xcm_version) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)? + ]) } fn query_xcm_weight(message: VersionedXcm<()>) -> Result { @@ -438,14 +441,25 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let local_asset = VersionedAssetId::V4(HereLocation::get().into()); - let asset = asset - .into_version(4) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; - - if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } - - Ok(WeightToFee::weight_to_fee(&weight)) + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == HereLocation::get() => { + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!( + target: "xcm::XcmPaymentApi::query_weight_to_asset_fee", + "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!" + ); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!( + target: "xcm::XcmPaymentApi::query_weight_to_asset_fee", + "query_weight_to_asset_fee - failed to convert asset: {asset:?}!" + ); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } } fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { @@ -471,12 +485,12 @@ sp_api::mock_impl_runtime_apis! { let forwarded_xcms = sent_xcm() .into_iter() .map(|(location, message)| ( - VersionedLocation::V4(location), - vec![VersionedXcm::V4(message)], + VersionedLocation::from(location), + vec![VersionedXcm::from(message)], )).collect(); let events: Vec = System::events().iter().map(|record| record.event.clone()).collect(); Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, @@ -511,8 +525,8 @@ sp_api::mock_impl_runtime_apis! { let forwarded_xcms = sent_xcm() .into_iter() .map(|(location, message)| ( - VersionedLocation::V4(location), - vec![VersionedXcm::V4(message)], + VersionedLocation::from(location), + vec![VersionedXcm::from(message)], )).collect(); let events: Vec = System::events().iter().map(|record| record.event.clone()).collect(); Ok(XcmDryRunEffects { -- GitLab From d54feeb101b3779422323224c8e1ac43d3a1fafa Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 21 May 2024 13:41:49 +0300 Subject: [PATCH 039/106] Fixed RPC subscriptions leak when subscription stream is finished (#4533) closes https://github.com/paritytech/parity-bridges-common/issues/3000 Recently we've changed our bridge configuration for Rococo <> Westend and our new relayer has started to submit transactions every ~ `30` seconds. Eventually, it switches itself into limbo state, where it can't submit more transactions - all `author_submitAndWatchExtrinsic` calls are failing with the following error: `ERROR bridge Failed to send transaction to BridgeHubRococo node: Call(ErrorObject { code: ServerError(-32006), message: "Too many subscriptions on the connection", data: Some(RawValue("Exceeded max limit of 1024")) })`. Some links for those who want to explore: - server side (node) has a strict limit on a number of active subscriptions. It fails to open a new subscription if this limit is hit: https://github.com/paritytech/jsonrpsee/blob/a4533966b997e83632509ad97eea010fc7c3efc0/server/src/middleware/rpc/layer/rpc_service.rs#L122-L132. The limit is set to `1024` by default; - internally this limit is a semaphore with `limit` permits: https://github.com/paritytech/jsonrpsee/blob/a4533966b997e83632509ad97eea010fc7c3efc0/core/src/server/subscription.rs#L461-L485; - semaphore permit is acquired in the first link; - the permit is "returned" when the `SubscriptionSink` is dropped: https://github.com/paritytech/jsonrpsee/blob/a4533966b997e83632509ad97eea010fc7c3efc0/core/src/server/subscription.rs#L310-L325; - the `SubscriptionSink` is dropped when [this `polkadot-sdk` function](https://github.com/paritytech/polkadot-sdk/blob/278486f9bf7db06c174203f098eec2f91839757a/substrate/client/rpc/src/utils.rs#L58-L94) returns. In other words - when the connection is closed, the stream is finished or internal subscription buffer limit is hit; - the subscription has the internal buffer, so sending an item contains of two steps: [reading an item from the underlying stream](https://github.com/paritytech/polkadot-sdk/blob/278486f9bf7db06c174203f098eec2f91839757a/substrate/client/rpc/src/utils.rs#L125-L141) and [sending it over the connection](https://github.com/paritytech/polkadot-sdk/blob/278486f9bf7db06c174203f098eec2f91839757a/substrate/client/rpc/src/utils.rs#L111-L116); - when the underlying stream is finished, the `inner_pipe_from_stream` wants to ensure that all items are sent to the subscriber. So it: [waits until the current send operation completes](https://github.com/paritytech/polkadot-sdk/blob/278486f9bf7db06c174203f098eec2f91839757a/substrate/client/rpc/src/utils.rs#L146-L148) and then [send all remaining items from the internal buffer](https://github.com/paritytech/polkadot-sdk/blob/278486f9bf7db06c174203f098eec2f91839757a/substrate/client/rpc/src/utils.rs#L150-L155). Once it is done, the function returns, the `SubscriptionSink` is dropped, semaphore permit is dropped and we are ready to accept new subscriptions; - unfortunately, the code just calls the `pending_fut.await.is_err()` to ensure that [the current send operation completes](https://github.com/paritytech/polkadot-sdk/blob/278486f9bf7db06c174203f098eec2f91839757a/substrate/client/rpc/src/utils.rs#L146-L148). But if there are no current send operation (which is normal), then the `pending_fut` is set to terminated future and the `await` never completes. Hence, no return from the function, no drop of `SubscriptionSink`, no drop of semaphore permit, no new subscriptions allowed (once number of susbcriptions hits the limit. I've illustrated the issue with small test - you may ensure that if e.g. the stream is initially empty, the `subscription_is_dropped_when_stream_is_empty` will hang because `pipe_from_stream` never exits. --- prdoc/pr_4533.prdoc | 10 ++++++++++ substrate/client/rpc/src/utils.rs | 26 +++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_4533.prdoc diff --git a/prdoc/pr_4533.prdoc b/prdoc/pr_4533.prdoc new file mode 100644 index 00000000000..a0835285fc0 --- /dev/null +++ b/prdoc/pr_4533.prdoc @@ -0,0 +1,10 @@ +title: "Fixed RPC subscriptions leak when subscription stream is finished" + +doc: + - audience: Node Operator + description: | + The node may leak RPC subscriptions in some cases, e.g. during + `author_submitAndWatchExtrinsic` calls. This PR fixes the issue. + +crates: + - name: sc-rpc diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index 6ec48efef84..3b5372615e7 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -143,7 +143,7 @@ async fn inner_pipe_from_stream( // // Process remaining items and terminate. Either::Right((Either::Right((None, pending_fut)), _)) => { - if pending_fut.await.is_err() { + if !pending_fut.is_terminated() && pending_fut.await.is_err() { return; } @@ -231,4 +231,28 @@ mod tests { _ = rx.next().await.unwrap(); assert!(sub.next::().await.is_none()); } + + #[tokio::test] + async fn subscription_is_dropped_when_stream_is_empty() { + let notify_rx = std::sync::Arc::new(tokio::sync::Notify::new()); + let notify_tx = notify_rx.clone(); + + let mut module = RpcModule::new(notify_tx); + module + .register_subscription("sub", "my_sub", "unsub", |_, pending, notify_tx| async move { + // emulate empty stream for simplicity: otherwise we need some mechanism + // to sync buffer and channel send operations + let stream = futures::stream::empty::<()>(); + // this should exit immediately + pipe_from_stream(pending, stream).await; + // notify that the `pipe_from_stream` has returned + notify_tx.notify_one(); + Ok(()) + }) + .unwrap(); + module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap(); + + // it should fire once `pipe_from_stream` returns + notify_rx.notified().await; + } } -- GitLab From e0e1f2d6278885d1ffebe3263315089e48572a26 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 21 May 2024 16:46:06 +0300 Subject: [PATCH 040/106] Bridge: added force_set_pallet_state call to pallet-bridge-grandpa (#4465) closes https://github.com/paritytech/parity-bridges-common/issues/2963 See issue above for rationale I've been thinking about adding similar calls to other pallets, but: - for parachains pallet I haven't been able to think of a case when we will need that given how long referendum takes. I.e. if storage proof format changes and we want to unstuck the bridge, it'll take a large a few weeks to sync a single parachain header, then another weeks for another and etc. - for messages pallet I've made the similar call initially, but it just changes a storage key (`OutboundLanes` and/or `InboundLanes`), so there's no any logic here and it may be simply done using `system.set_storage`. --------- Co-authored-by: command-bot <> --- bridges/modules/grandpa/src/benchmarking.rs | 14 ++ bridges/modules/grandpa/src/lib.rs | 178 ++++++++++++++++-- bridges/modules/grandpa/src/weights.rs | 49 +++++ .../src/weights/pallet_bridge_grandpa.rs | 48 +++-- .../src/weights/pallet_bridge_grandpa.rs | 48 +++-- prdoc/pr_4465.prdoc | 19 ++ 6 files changed, 311 insertions(+), 45 deletions(-) create mode 100644 prdoc/pr_4465.prdoc diff --git a/bridges/modules/grandpa/src/benchmarking.rs b/bridges/modules/grandpa/src/benchmarking.rs index 11033373ce4..a458abf524d 100644 --- a/bridges/modules/grandpa/src/benchmarking.rs +++ b/bridges/modules/grandpa/src/benchmarking.rs @@ -138,5 +138,19 @@ benchmarks_instance_pallet! { assert!(!>::contains_key(genesis_header.hash())); } + force_set_pallet_state { + let set_id = 100; + let authorities = accounts(T::BridgedChain::MAX_AUTHORITIES_COUNT as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + let (header, _) = prepare_benchmark_data::(1, 1); + let expected_hash = header.hash(); + }: force_set_pallet_state(RawOrigin::Root, set_id, authorities, Box::new(header)) + verify { + assert_eq!(>::get().unwrap().1, expected_hash); + assert_eq!(>::get().set_id, set_id); + } + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index 4569fc2b19f..3b77f676870 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -44,7 +44,7 @@ use bp_header_chain::{ }; use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule}; use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound}; -use sp_consensus_grandpa::SetId; +use sp_consensus_grandpa::{AuthorityList, SetId}; use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, @@ -360,6 +360,42 @@ pub mod pallet { Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) } + + /// Set current authorities set and best finalized bridged header to given values + /// (almost) without any checks. This call can fail only if: + /// + /// - the call origin is not a root or a pallet owner; + /// + /// - there are too many authorities in the new set. + /// + /// No other checks are made. Previously imported headers stay in the storage and + /// are still accessible after the call. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::force_set_pallet_state())] + pub fn force_set_pallet_state( + origin: OriginFor, + new_current_set_id: SetId, + new_authorities: AuthorityList, + new_best_header: Box>, + ) -> DispatchResult { + Self::ensure_owner_or_root(origin)?; + + // save new authorities set. It only fails if there are too many authorities + // in the new set + save_authorities_set::( + CurrentAuthoritySet::::get().set_id, + new_current_set_id, + new_authorities, + )?; + + // save new best header. It may be older than the best header that is already + // known to the pallet - it changes nothing (except for the fact that previously + // imported headers may still be used to prove something) + let new_best_header_hash = new_best_header.hash(); + insert_header::(*new_best_header, new_best_header_hash); + + Ok(()) + } } /// Number of free header submissions that we may yet accept in the current block. @@ -592,33 +628,45 @@ pub mod pallet { // GRANDPA only includes a `delay` for forced changes, so this isn't valid. ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); - // TODO [#788]: Stop manually increasing the `set_id` here. - let next_authorities = StoredAuthoritySet:: { - authorities: change - .next_authorities - .try_into() - .map_err(|_| Error::::TooManyAuthoritiesInSet)?, - set_id: current_set_id + 1, - }; - // Since our header schedules a change and we know the delay is 0, it must also enact // the change. - >::put(&next_authorities); - - log::info!( - target: LOG_TARGET, - "Transitioned from authority set {} to {}! New authorities are: {:?}", + // TODO [#788]: Stop manually increasing the `set_id` here. + return save_authorities_set::( current_set_id, current_set_id + 1, - next_authorities, + change.next_authorities, ); - - return Ok(Some(next_authorities.into())) }; Ok(None) } + /// Save new authorities set. + pub(crate) fn save_authorities_set, I: 'static>( + old_current_set_id: SetId, + new_current_set_id: SetId, + new_authorities: AuthorityList, + ) -> Result, DispatchError> { + let next_authorities = StoredAuthoritySet:: { + authorities: new_authorities + .try_into() + .map_err(|_| Error::::TooManyAuthoritiesInSet)?, + set_id: new_current_set_id, + }; + + >::put(&next_authorities); + + log::info!( + target: LOG_TARGET, + "Transitioned from authority set {} to {}! New authorities are: {:?}", + old_current_set_id, + new_current_set_id, + next_authorities, + ); + + Ok(Some(next_authorities.into())) + } + /// Verify a GRANDPA justification (finality proof) for a given header. /// /// Will use the GRANDPA current authorities known to the pallet. @@ -1700,4 +1748,98 @@ mod tests { assert_eq!(FreeHeadersRemaining::::get(), Some(0)); }) } + + #[test] + fn force_set_pallet_state_works() { + run_test(|| { + let header25 = test_header(25); + let header50 = test_header(50); + let ok_new_set_id = 100; + let ok_new_authorities = authority_list(); + let bad_new_set_id = 100; + let bad_new_authorities: Vec<_> = std::iter::repeat((ALICE.into(), 1)) + .take(MAX_BRIDGED_AUTHORITIES as usize + 1) + .collect(); + + // initialize and import several headers + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(30)); + + // wrong origin => error + assert_noop!( + Pallet::::force_set_pallet_state( + RuntimeOrigin::signed(1), + ok_new_set_id, + ok_new_authorities.clone(), + Box::new(header50.clone()), + ), + DispatchError::BadOrigin, + ); + + // too many authorities in the set => error + assert_noop!( + Pallet::::force_set_pallet_state( + RuntimeOrigin::root(), + bad_new_set_id, + bad_new_authorities.clone(), + Box::new(header50.clone()), + ), + Error::::TooManyAuthoritiesInSet, + ); + + // force import header 50 => ok + assert_ok!(Pallet::::force_set_pallet_state( + RuntimeOrigin::root(), + ok_new_set_id, + ok_new_authorities.clone(), + Box::new(header50.clone()), + ),); + + // force import header 25 after 50 => ok + assert_ok!(Pallet::::force_set_pallet_state( + RuntimeOrigin::root(), + ok_new_set_id, + ok_new_authorities.clone(), + Box::new(header25.clone()), + ),); + + // we may import better headers + assert_noop!(submit_finality_proof(20), Error::::OldHeader); + assert_ok!(submit_finality_proof_with_set_id(26, ok_new_set_id)); + + // we can even reimport header #50. It **will cause** some issues during pruning + // (see below) + assert_ok!(submit_finality_proof_with_set_id(50, ok_new_set_id)); + + // and all headers are available. Even though there are 4 headers, the ring + // buffer thinks that there are 5, because we've imported header $50 twice + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(30).hash() + ) + .is_some()); + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(50).hash() + ) + .is_some()); + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(25).hash() + ) + .is_some()); + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(26).hash() + ) + .is_some()); + + // next header import will prune header 30 + assert_ok!(submit_finality_proof_with_set_id(70, ok_new_set_id)); + // next header import will prune header 50 + assert_ok!(submit_finality_proof_with_set_id(80, ok_new_set_id)); + // next header import will prune header 25 + assert_ok!(submit_finality_proof_with_set_id(90, ok_new_set_id)); + // next header import will prune header 26 + assert_ok!(submit_finality_proof_with_set_id(100, ok_new_set_id)); + // next header import will prune header 50 again. But it is fine + assert_ok!(submit_finality_proof_with_set_id(110, ok_new_set_id)); + }); + } } diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index a75e7b5a8e4..9719bc9c022 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -51,6 +51,7 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bridge_grandpa. pub trait WeightInfo { fn submit_finality_proof(p: u32, v: u32) -> Weight; + fn force_set_pallet_state() -> Weight; } /// Weights for `pallet_bridge_grandpa` that are generated using one of the Bridge testnets. @@ -109,6 +110,30 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } + + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: + /// Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: + /// Some(4), added: 499, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHashes` + /// (r:1 w:1) Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), + /// `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), + /// added: 531, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 + /// w:2) Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: + /// Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `51735` + // Minimum execution time: 62_232_000 picoseconds. + Weight::from_parts(78_755_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(6)) + } } // For backwards compatibility and tests @@ -164,4 +189,28 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } + + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: + /// Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: + /// Some(4), added: 499, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHashes` + /// (r:1 w:1) Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), + /// `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), + /// added: 531, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 + /// w:2) Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: + /// Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `51735` + // Minimum execution time: 62_232_000 picoseconds. + Weight::from_parts(78_755_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(6)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 8c2435599f5..257e2dcac2f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,12 +48,14 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) @@ -62,18 +64,36 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { + fn submit_finality_proof(p: u32, _v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `335 + p * (60 ยฑ0)` + // Measured: `440 + p * (60 ยฑ0)` // Estimated: `51735` - // Minimum execution time: 310_124_000 picoseconds. - Weight::from_parts(18_294_977, 0) + // Minimum execution time: 306_046_000 picoseconds. + Weight::from_parts(384_361_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 5_665 - .saturating_add(Weight::from_parts(55_380_719, 0).saturating_mul(p.into())) - // Standard Error: 94_494 - .saturating_add(Weight::from_parts(2_765_959, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 14_298 + .saturating_add(Weight::from_parts(49_045_748, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `51735` + // Minimum execution time: 94_965_000 picoseconds. + Weight::from_parts(113_633_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(6)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs index e87ed668dfc..348d651396c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,12 +48,14 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { + /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:1 w:1) /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:0) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) @@ -64,16 +66,36 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ยฑ0)` + // Measured: `270 + p * (60 ยฑ0)` // Estimated: `51735` - // Minimum execution time: 303_549_000 picoseconds. - Weight::from_parts(306_232_000, 0) + // Minimum execution time: 294_098_000 picoseconds. + Weight::from_parts(31_208_540, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 4_641 - .saturating_add(Weight::from_parts(55_196_301, 0).saturating_mul(p.into())) - // Standard Error: 35_813 - .saturating_add(Weight::from_parts(70_584, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 8_832 + .saturating_add(Weight::from_parts(40_930_987, 0).saturating_mul(p.into())) + // Standard Error: 147_319 + .saturating_add(Weight::from_parts(2_663_839, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } + /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `51735` + // Minimum execution time: 112_875_000 picoseconds. + Weight::from_parts(120_861_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(6)) + } } diff --git a/prdoc/pr_4465.prdoc b/prdoc/pr_4465.prdoc new file mode 100644 index 00000000000..cbeff09f871 --- /dev/null +++ b/prdoc/pr_4465.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bridge: added force_set_pallet_state call to pallet-bridge-grandpa" + +doc: + - audience: Runtime Dev + description: | + Added `force_set_pallet_state` to the `pallet-bridge-grandpa`. It is only callable by the + root (governance or sudo) and may be used to update current authorities set and the best + finalized header without any additional checks. + +crates: + - name: pallet-bridge-grandpa + bump: major + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor -- GitLab From d05786ffb5523c334f10d16870c2e73674661a52 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 21 May 2024 19:10:10 +0300 Subject: [PATCH 041/106] Replace `Multiaddr` & related types with substrate-specific types (#4198) This PR introduces custom types / substrate wrappers for `Multiaddr`, `multiaddr::Protocol`, `Multihash`, `ed25519::*` and supplementary types like errors and iterators. This is needed to unblock `libp2p` upgrade PR https://github.com/paritytech/polkadot-sdk/pull/1631 after https://github.com/paritytech/polkadot-sdk/pull/2944 was merged. `libp2p` and `litep2p` currently depend on different versions of `multiaddr` crate, and introduction of this "common ground" types is needed to support independent version upgrades of `multiaddr` and dependent crates in `libp2p` & `litep2p`. While being just convenient to not tie versions of `libp2p` & `litep2p` dependencies together, it's currently not even possible to keep `libp2p` & `litep2p` dependencies updated to the same versions as `multiaddr` in `libp2p` depends on `libp2p-identity` that we can't include as a dependency of `litep2p`, which has it's own `PeerId` type. In the future, to keep things updated on `litep2p` side, we will likely need to fork `multiaddr` and make it use `litep2p` `PeerId` as a payload of `/p2p/...` protocol. With these changes, common code in substrate uses these custom types, and `litep2p` & `libp2p` backends use corresponding libraries types. --- Cargo.lock | 15 +- prdoc/pr_4198.prdoc | 31 + .../client/authority-discovery/src/error.rs | 4 +- .../client/authority-discovery/src/worker.rs | 6 +- .../src/worker/addr_cache.rs | 2 +- .../authority-discovery/src/worker/tests.rs | 10 +- .../client/cli/src/params/node_key_params.rs | 10 +- .../client/mixnet/src/sync_with_runtime.rs | 7 +- substrate/client/network/src/config.rs | 38 +- substrate/client/network/src/error.rs | 2 +- substrate/client/network/src/lib.rs | 6 +- .../client/network/src/litep2p/discovery.rs | 7 +- substrate/client/network/src/litep2p/mod.rs | 49 +- .../client/network/src/litep2p/service.rs | 24 +- substrate/client/network/src/peer_store.rs | 3 +- .../src/protocol/notifications/service/mod.rs | 2 +- .../protocol/notifications/service/tests.rs | 2 +- .../client/network/src/protocol_controller.rs | 9 +- substrate/client/network/src/service.rs | 83 +-- .../client/network/src/service/traits.rs | 4 +- .../client/network/sync/src/service/mock.rs | 4 +- substrate/client/network/test/src/fuzz.rs | 3 +- substrate/client/network/test/src/lib.rs | 9 +- substrate/client/network/types/Cargo.toml | 5 + substrate/client/network/types/src/ed25519.rs | 551 ++++++++++++++++++ substrate/client/network/types/src/lib.rs | 8 +- .../client/network/types/src/multiaddr.rs | 251 ++++++++ .../network/types/src/multiaddr/protocol.rs | 138 +++++ .../client/network/types/src/multihash.rs | 192 ++++++ substrate/client/network/types/src/peer_id.rs | 8 +- substrate/client/telemetry/src/endpoints.rs | 2 +- substrate/client/telemetry/src/lib.rs | 2 +- substrate/client/telemetry/src/node.rs | 3 +- 33 files changed, 1343 insertions(+), 147 deletions(-) create mode 100644 prdoc/pr_4198.prdoc create mode 100644 substrate/client/network/types/src/ed25519.rs create mode 100644 substrate/client/network/types/src/multiaddr.rs create mode 100644 substrate/client/network/types/src/multiaddr/protocol.rs create mode 100644 substrate/client/network/types/src/multihash.rs diff --git a/Cargo.lock b/Cargo.lock index 2a4b9b138bf..1f908bb49ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1896,7 +1896,7 @@ dependencies = [ "bp-parachains", "bp-polkadot-core", "bp-runtime", - "ed25519-dalek 2.1.0", + "ed25519-dalek 2.1.1", "finality-grandpa", "parity-scale-codec", "sp-application-crypto", @@ -4961,9 +4961,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek 4.1.2", "ed25519 2.2.2", @@ -7626,7 +7626,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "276bb57e7af15d8f100d3c11cbdd32c6752b7eef4ba7a18ecf464972c07abcce" dependencies = [ "bs58 0.4.0", - "ed25519-dalek 2.1.0", + "ed25519-dalek 2.1.1", "log", "multiaddr", "multihash 0.17.0", @@ -17213,12 +17213,15 @@ name = "sc-network-types" version = "0.10.0" dependencies = [ "bs58 0.5.0", + "ed25519-dalek 2.1.1", "libp2p-identity", "litep2p", "multiaddr", "multihash 0.17.0", + "quickcheck", "rand 0.8.5", "thiserror", + "zeroize", ] [[package]] @@ -19497,7 +19500,7 @@ name = "sp-io" version = "30.0.0" dependencies = [ "bytes", - "ed25519-dalek 2.1.0", + "ed25519-dalek 2.1.1", "libsecp256k1", "log", "parity-scale-codec", @@ -19823,7 +19826,7 @@ version = "10.0.0" dependencies = [ "aes-gcm", "curve25519-dalek 4.1.2", - "ed25519-dalek 2.1.0", + "ed25519-dalek 2.1.1", "hkdf", "parity-scale-codec", "rand 0.8.5", diff --git a/prdoc/pr_4198.prdoc b/prdoc/pr_4198.prdoc new file mode 100644 index 00000000000..cff95681260 --- /dev/null +++ b/prdoc/pr_4198.prdoc @@ -0,0 +1,31 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Replace `Multiaddr` & related types with substrate-specific types + +doc: + - audience: Node Dev + description: | + Introduce custom types / substrate wrappers for `Multiaddr`, `multiaddr::Protocol`, + `Multihash`, `ed25519::*` and supplementary types like errors and iterators. + + Common code in substrate uses these custom types, while `libp2p` & `litep2p` network + backends use their corresponding libraries types. + + This is needed to independently upgrade `libp2p` & `litep2p` dependencies. + +crates: + - name: sc-network-types + bump: minor + - name: sc-network + bump: minor + - name: sc-network-sync + bump: minor + - name: sc-authority-discovery + bump: minor + - name: sc-cli + bump: patch + - name: sc-mixnet + bump: patch + - name: sc-telemetry + bump: patch diff --git a/substrate/client/authority-discovery/src/error.rs b/substrate/client/authority-discovery/src/error.rs index 6f791237c2f..d2c567d77af 100644 --- a/substrate/client/authority-discovery/src/error.rs +++ b/substrate/client/authority-discovery/src/error.rs @@ -35,7 +35,7 @@ pub enum Error { VerifyingDhtPayload, #[error("Failed to hash the authority id to be used as a dht key.")] - HashingAuthorityId(#[from] sc_network::multiaddr::multihash::Error), + HashingAuthorityId(#[from] sc_network_types::multihash::Error), #[error("Failed calling into the Substrate runtime: {0}")] CallingRuntime(#[from] sp_blockchain::Error), @@ -53,7 +53,7 @@ pub enum Error { EncodingDecodingScale(#[from] codec::Error), #[error("Failed to parse a libp2p multi address.")] - ParsingMultiaddress(#[from] sc_network::multiaddr::Error), + ParsingMultiaddress(#[from] sc_network::multiaddr::ParseError), #[error("Failed to parse a libp2p key: {0}")] ParsingLibp2pIdentity(String), diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 53418d2d38c..d89083100aa 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,7 +35,6 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use linked_hash_set::LinkedHashSet; -use multihash::{Code, Multihash, MultihashDigest}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; @@ -46,7 +45,10 @@ use sc_network::{ event::DhtEvent, multiaddr, KademliaKey, Multiaddr, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, }; -use sc_network_types::PeerId; +use sc_network_types::{ + multihash::{Code, Multihash}, + PeerId, +}; use sp_api::{ApiError, ProvideRuntimeApi}; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, diff --git a/substrate/client/authority-discovery/src/worker/addr_cache.rs b/substrate/client/authority-discovery/src/worker/addr_cache.rs index 6e3b3c8af20..77cdfbd4f15 100644 --- a/substrate/client/authority-discovery/src/worker/addr_cache.rs +++ b/substrate/client/authority-discovery/src/worker/addr_cache.rs @@ -176,8 +176,8 @@ fn addresses_to_peer_ids(addresses: &HashSet) -> HashSet { mod tests { use super::*; - use multihash::{self, Multihash}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; + use sc_network_types::multihash::Multihash; use sp_authority_discovery::{AuthorityId, AuthorityPair}; use sp_core::crypto::Pair; diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index caeac56c540..70107c89a85 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -29,11 +29,15 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{core::multiaddr, identity::SigningError, kad::record::Key as KademliaKey, PeerId}; +use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; use prometheus_endpoint::prometheus::default_registry; use sc_client_api::HeaderBackend; use sc_network::{service::signature::Keypair, Signature}; +use sc_network_types::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -168,7 +172,7 @@ impl NetworkSigner for TestNetwork { let public_key = libp2p::identity::PublicKey::try_decode_protobuf(&public_key) .map_err(|error| error.to_string())?; let peer_id: PeerId = peer_id.into(); - let remote: libp2p::PeerId = public_key.to_peer_id(); + let remote: PeerId = public_key.to_peer_id().into(); Ok(peer_id == remote && public_key.verify(message, signature)) } @@ -435,7 +439,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { let peer_id = PeerId::random(); let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p(peer_id.into())) + address.with(Protocol::P2p(peer_id.into())) }; let remote_key_store = MemoryKeystore::new(); let remote_public_key: AuthorityId = remote_key_store diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index 7058af19f1d..0e12c7a2a2d 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use clap::Args; -use sc_network::config::{identity::ed25519, NodeKeyConfig}; +use sc_network::config::{ed25519, NodeKeyConfig}; use sc_service::Role; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; @@ -148,7 +148,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result Result<(PeerId, Multiaddr), ParseErr> /// # Example /// /// ``` -/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_types::{multiaddr::Multiaddr, PeerId}; /// use sc_network::config::MultiaddrWithPeerId; /// let addr: MultiaddrWithPeerId = /// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); @@ -187,7 +186,7 @@ impl TryFrom for MultiaddrWithPeerId { #[derive(Debug)] pub enum ParseErr { /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), + MultiaddrParse(multiaddr::ParseError), /// Multihash of the peer ID is invalid. InvalidPeerId, /// The peer ID is missing from the address. @@ -214,8 +213,8 @@ impl std::error::Error for ParseErr { } } -impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { +impl From for ParseErr { + fn from(err: multiaddr::ParseError) -> ParseErr { Self::MultiaddrParse(err) } } @@ -343,10 +342,10 @@ impl NodeKeyConfig { /// /// * If the secret is configured to be new, it is generated and the corresponding keypair is /// returned. - pub fn into_keypair(self) -> io::Result { + pub fn into_keypair(self) -> io::Result { use NodeKeyConfig::*; match self { - Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), + Ed25519(Secret::New) => Ok(ed25519::Keypair::generate()), Ed25519(Secret::Input(k)) => Ok(ed25519::Keypair::from(k).into()), @@ -365,8 +364,7 @@ impl NodeKeyConfig { ed25519::SecretKey::generate, |b| b.as_ref().to_vec(), ) - .map(ed25519::Keypair::from) - .map(Keypair::from), + .map(ed25519::Keypair::from), } } } @@ -887,7 +885,7 @@ impl> FullNetworkConfig .find(|o| o.peer_id != bootnode.peer_id) { Err(crate::error::Error::DuplicateBootnode { - address: bootnode.multiaddr.clone(), + address: bootnode.multiaddr.clone().into(), first_id: bootnode.peer_id.into(), second_id: other.peer_id.into(), }) @@ -947,14 +945,8 @@ mod tests { tempfile::Builder::new().prefix(prefix).tempdir().unwrap() } - fn secret_bytes(kp: Keypair) -> Vec { - kp.try_into_ed25519() - .expect("ed25519 keypair") - .secret() - .as_ref() - .iter() - .cloned() - .collect() + fn secret_bytes(kp: ed25519::Keypair) -> Vec { + kp.secret().to_bytes().into() } #[test] diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs index b776e3e1ad9..376b8461be4 100644 --- a/substrate/client/network/src/error.rs +++ b/substrate/client/network/src/error.rs @@ -20,7 +20,7 @@ use crate::{config::TransportConfig, types::ProtocolName}; -use libp2p::{Multiaddr, PeerId}; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use std::fmt; diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index 8f479825c8d..99a972f914e 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -272,6 +272,10 @@ pub use sc_network_common::{ role::{ObservedRole, Roles}, types::ReputationChange, }; +pub use sc_network_types::{ + multiaddr::{self, Multiaddr}, + PeerId, +}; pub use service::{ metrics::NotificationMetrics, signature::Signature, @@ -285,7 +289,7 @@ pub use service::{ DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure, PublicKey, }; -pub use types::{multiaddr, Multiaddr, PeerId, ProtocolName}; +pub use types::ProtocolName; /// The maximum allowed number of established connections per peer. /// diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 2120ea7c615..351380755db 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -20,9 +20,7 @@ use crate::{ config::{NetworkConfiguration, ProtocolId}, - multiaddr::Protocol, peer_store::PeerStoreProvider, - Multiaddr, }; use array_bytes::bytes2hex; @@ -42,6 +40,7 @@ use litep2p::{ }, mdns::{Config as MdnsConfig, MdnsEvent}, }, + types::multiaddr::{Multiaddr, Protocol}, PeerId, ProtocolName, }; use parking_lot::RwLock; @@ -227,7 +226,7 @@ impl Discovery { let (identify_config, identify_event_stream) = IdentifyConfig::new( "/substrate/1.0".to_string(), Some(user_agent), - config.public_addresses.clone(), + config.public_addresses.clone().into_iter().map(Into::into).collect(), ); let (mdns_config, mdns_event_stream) = match config.transport { @@ -266,7 +265,7 @@ impl Discovery { duration_to_next_find_query: Duration::from_secs(1), address_confirmations: LruMap::new(ByLength::new(8)), allow_non_global_addresses: config.allow_non_globals_in_dht, - public_addresses: config.public_addresses.iter().cloned().collect(), + public_addresses: config.public_addresses.iter().cloned().map(Into::into).collect(), next_kad_query: Some(Delay::new(KADEMLIA_QUERY_INTERVAL)), local_protocols: HashSet::from_iter([kademlia_protocol_name( genesis_hash, diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 1137c73b56d..67085a81a5c 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -38,7 +38,6 @@ use crate::{ request_response::{RequestResponseConfig, RequestResponseProtocol}, }, }, - multiaddr::{Multiaddr, Protocol}, peer_store::PeerStoreProvider, protocol, service::{ @@ -54,7 +53,7 @@ use futures::StreamExt; use libp2p::kad::RecordKey; use litep2p::{ config::ConfigBuilder, - crypto::ed25519::{Keypair, SecretKey}, + crypto::ed25519::Keypair, executor::Executor, protocol::{ libp2p::{bitswap::Config as BitswapConfig, kademlia::QueryId}, @@ -64,7 +63,10 @@ use litep2p::{ tcp::config::Config as TcpTransportConfig, websocket::config::Config as WebSocketTransportConfig, Endpoint, }, - types::ConnectionId, + types::{ + multiaddr::{Multiaddr, Protocol}, + ConnectionId, + }, Error as Litep2pError, Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName, }; use parking_lot::RwLock; @@ -81,7 +83,7 @@ use std::{ collections::{hash_map::Entry, HashMap, HashSet}, fs, future::Future, - io, iter, + iter, pin::Pin, sync::{ atomic::{AtomicUsize, Ordering}, @@ -200,12 +202,12 @@ impl Litep2pNetworkBackend { Protocol::Ip4(_), ) => match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_))) { - Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash) + Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash.into()) .map_or(None, |peer| Some((peer, Some(address)))), _ => None, }, Some(Protocol::P2p(multihash)) => - PeerId::from_multihash(multihash).map_or(None, |peer| Some((peer, None))), + PeerId::from_multihash(multihash.into()).map_or(None, |peer| Some((peer, None))), _ => None, }) .fold(HashMap::new(), |mut acc, (peer, maybe_address)| { @@ -244,16 +246,9 @@ impl Litep2pNetworkBackend { impl Litep2pNetworkBackend { /// Get `litep2p` keypair from `NodeKeyConfig`. fn get_keypair(node_key: &NodeKeyConfig) -> Result<(Keypair, litep2p::PeerId), Error> { - let secret = libp2p::identity::Keypair::try_into_ed25519(node_key.clone().into_keypair()?) - .map_err(|error| { - log::error!(target: LOG_TARGET, "failed to convert to ed25519: {error:?}"); - Error::Io(io::ErrorKind::InvalidInput.into()) - })? - .secret(); - - let mut secret = secret.as_ref().iter().cloned().collect::>(); - let secret = SecretKey::from_bytes(&mut secret) - .map_err(|_| Error::Io(io::ErrorKind::InvalidInput.into()))?; + let secret: litep2p::crypto::ed25519::SecretKey = + node_key.clone().into_keypair()?.secret().into(); + let local_identity = Keypair::from(secret); let local_public = local_identity.public(); let local_peer_id = local_public.to_peer_id(); @@ -327,6 +322,8 @@ impl Litep2pNetworkBackend { .listen_addresses .iter() .filter_map(|address| { + use sc_network_types::multiaddr::Protocol; + let mut iter = address.iter(); match iter.next() { @@ -367,12 +364,12 @@ impl Litep2pNetworkBackend { config_builder .with_websocket(WebSocketTransportConfig { - listen_addresses: websocket.into_iter().flatten().collect(), + listen_addresses: websocket.into_iter().flatten().map(Into::into).collect(), yamux_config: yamux_config.clone(), ..Default::default() }) .with_tcp(TcpTransportConfig { - listen_addresses: tcp.into_iter().flatten().collect(), + listen_addresses: tcp.into_iter().flatten().map(Into::into).collect(), yamux_config, ..Default::default() }) @@ -522,6 +519,8 @@ impl NetworkBackend for Litep2pNetworkBac // collect known addresses let known_addresses: HashMap> = known_addresses.into_iter().fold(HashMap::new(), |mut acc, (peer, address)| { + use sc_network_types::multiaddr::Protocol; + let address = match address.iter().last() { Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) => address.with(Protocol::P2p(peer.into())), @@ -529,7 +528,7 @@ impl NetworkBackend for Litep2pNetworkBac _ => return acc, }; - acc.entry(peer.into()).or_default().push(address); + acc.entry(peer.into()).or_default().push(address.into()); peer_store_handle.add_known_peer(peer); acc @@ -567,7 +566,7 @@ impl NetworkBackend for Litep2pNetworkBac Litep2p::new(config_builder.build()).map_err(|error| Error::Litep2p(error))?; let external_addresses: Arc>> = Arc::new(RwLock::new( - HashSet::from_iter(network_config.public_addresses.iter().cloned()), + HashSet::from_iter(network_config.public_addresses.iter().cloned().map(Into::into)), )); litep2p.listen_addresses().for_each(|address| { log::debug!(target: LOG_TARGET, "listening on: {address}"); @@ -713,7 +712,7 @@ impl NetworkBackend for Litep2pNetworkBac protocol, peers, } => { - let peers = self.add_addresses(peers.into_iter()); + let peers = self.add_addresses(peers.into_iter().map(Into::into)); match self.peerset_handles.get(&protocol) { Some(handle) => { @@ -722,9 +721,11 @@ impl NetworkBackend for Litep2pNetworkBac None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"), }; } - NetworkServiceCommand::AddKnownAddress { peer, mut address } => { + NetworkServiceCommand::AddKnownAddress { peer, address } => { + let mut address: Multiaddr = address.into(); + if !address.iter().any(|protocol| std::matches!(protocol, Protocol::P2p(_))) { - address.push(Protocol::P2p(peer.into())); + address.push(Protocol::P2p(litep2p::PeerId::from(peer).into())); } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { @@ -735,7 +736,7 @@ impl NetworkBackend for Litep2pNetworkBac } }, NetworkServiceCommand::SetReservedPeers { protocol, peers } => { - let peers = self.add_addresses(peers.into_iter()); + let peers = self.add_addresses(peers.into_iter().map(Into::into)); match self.peerset_handles.get(&protocol) { Some(handle) => { diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index 86f11aa6e14..09b869abdf5 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -24,7 +24,6 @@ use crate::{ notification::{config::ProtocolControlHandle, peerset::PeersetCommand}, request_response::OutboundRequest, }, - multiaddr::Protocol, network_state::NetworkState, peer_store::PeerStoreProvider, service::out_events, @@ -35,15 +34,18 @@ use crate::{ use codec::DecodeAll; use futures::{channel::oneshot, stream::BoxStream}; -use libp2p::{identity::SigningError, kad::record::Key as KademliaKey, Multiaddr}; -use litep2p::crypto::ed25519::Keypair; +use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; +use litep2p::{crypto::ed25519::Keypair, types::multiaddr::Multiaddr as LiteP2pMultiaddr}; use parking_lot::RwLock; use sc_network_common::{ role::{ObservedRole, Roles}, types::ReputationChange, }; -use sc_network_types::PeerId; +use sc_network_types::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sc_utils::mpsc::TracingUnboundedSender; use std::{ @@ -165,10 +167,10 @@ pub struct Litep2pNetworkService { request_response_protocols: HashMap>, /// Listen addresses. - listen_addresses: Arc>>, + listen_addresses: Arc>>, /// External addresses. - external_addresses: Arc>>, + external_addresses: Arc>>, } impl Litep2pNetworkService { @@ -181,8 +183,8 @@ impl Litep2pNetworkService { peerset_handles: HashMap, block_announce_protocol: ProtocolName, request_response_protocols: HashMap>, - listen_addresses: Arc>>, - external_addresses: Arc>>, + listen_addresses: Arc>>, + external_addresses: Arc>>, ) -> Self { Self { local_peer_id, @@ -322,7 +324,7 @@ impl NetworkPeers for Litep2pNetworkService { fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::AddPeersToReservedSet { protocol: self.block_announce_protocol.clone(), - peers: HashSet::from_iter([peer.concat()]), + peers: HashSet::from_iter([peer.concat().into()]), }); Ok(()) @@ -415,11 +417,11 @@ impl NetworkEventStream for Litep2pNetworkService { impl NetworkStateInfo for Litep2pNetworkService { fn external_addresses(&self) -> Vec { - self.external_addresses.read().iter().cloned().collect() + self.external_addresses.read().iter().cloned().map(Into::into).collect() } fn listen_addresses(&self) -> Vec { - self.listen_addresses.read().iter().cloned().collect() + self.listen_addresses.read().iter().cloned().map(Into::into).collect() } fn local_peer_id(&self) -> PeerId { diff --git a/substrate/client/network/src/peer_store.rs b/substrate/client/network/src/peer_store.rs index a4c739f1448..987405500dc 100644 --- a/substrate/client/network/src/peer_store.rs +++ b/substrate/client/network/src/peer_store.rs @@ -19,8 +19,9 @@ //! [`PeerStore`] manages peer reputations and provides connection candidates to //! [`crate::protocol_controller::ProtocolController`]. -use crate::{service::traits::PeerStore as PeerStoreT, PeerId}; +use crate::service::traits::PeerStore as PeerStoreT; +use libp2p::PeerId; use log::trace; use parking_lot::Mutex; use partial_sort::PartialSort; diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 15d289d170e..4f6d32ae3b3 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -28,13 +28,13 @@ use crate::{ }, }, types::ProtocolName, - PeerId, }; use futures::{ stream::{FuturesUnordered, Stream}, StreamExt, }; +use libp2p::PeerId; use parking_lot::Mutex; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs index f0157f6d28d..32ccb3348ad 100644 --- a/substrate/client/network/src/protocol/notifications/service/tests.rs +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -200,7 +200,7 @@ async fn send_async_notification_to_non_existent_peer() { if let Err(error::Error::PeerDoesntExist(peer_id)) = notif.send_async_notification(&peer.into(), vec![1, 3, 3, 7]).await { - assert_eq!(peer, peer_id); + assert_eq!(peer, peer_id.into()); } else { panic!("invalid error received from `send_async_notification()`"); } diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 2c3e6744e32..da51a7a4f9f 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -41,12 +41,10 @@ //! Even though this does not guarantee that `ProtocolController` and `Notifications` have the same //! view of the peers' states at any given moment, the eventual consistency is maintained. -use crate::{ - peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT}, - PeerId, -}; +use crate::peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT}; use futures::{channel::oneshot, future::Either, FutureExt, StreamExt}; +use libp2p::PeerId; use log::{debug, error, trace, warn}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_arithmetic::traits::SaturatedConversion; @@ -860,8 +858,9 @@ mod tests { use super::*; use crate::{ peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT}, - PeerId, ReputationChange, + ReputationChange, }; + use libp2p::PeerId; use sc_network_common::role::ObservedRole; use sc_utils::mpsc::{tracing_unbounded, TryRecvError}; use std::collections::HashSet; diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 807c5b5a80a..1aaa63191a8 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -55,24 +55,26 @@ use crate::{ }, transport, types::ProtocolName, - Multiaddr, NotificationService, PeerId, ReputationChange, + NotificationService, ReputationChange, }; use codec::DecodeAll; use either::Either; use futures::{channel::oneshot, prelude::*}; +use libp2p::identity::ed25519; #[allow(deprecated)] use libp2p::{ connection_limits::Exceeded, core::{upgrade, ConnectedPoint, Endpoint}, identify::Info as IdentifyInfo, kad::record::Key as KademliaKey, - multiaddr, + multiaddr::{self, Multiaddr}, ping::Failure as PingFailure, swarm::{ AddressScore, ConnectionError, ConnectionId, ConnectionLimits, DialError, Executor, ListenError, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr, }, + PeerId, }; use log::{debug, error, info, trace, warn}; use metrics::{Histogram, MetricSources, Metrics}; @@ -269,6 +271,15 @@ where let local_public = local_identity.public(); let local_peer_id = local_public.to_peer_id(); + // Convert to libp2p types. + let local_identity: ed25519::Keypair = local_identity.into(); + let local_public: ed25519::PublicKey = local_public.into(); + let local_peer_id: PeerId = local_peer_id.into(); + let listen_addresses: Vec = + network_config.listen_addresses.iter().cloned().map(Into::into).collect(); + let public_addresses: Vec = + network_config.public_addresses.iter().cloned().map(Into::into).collect(); + network_config.boot_nodes = network_config .boot_nodes .into_iter() @@ -370,7 +381,7 @@ where }; transport::build_transport( - local_identity.clone(), + local_identity.clone().into(), config_mem, network_config.yamux_window_size, yamux_maximum_buffer_size, @@ -462,7 +473,7 @@ where .find(|o| o.peer_id != bootnode.peer_id) { Err(Error::DuplicateBootnode { - address: bootnode.multiaddr.clone(), + address: bootnode.multiaddr.clone().into(), first_id: bootnode.peer_id.into(), second_id: other.peer_id.into(), }) @@ -478,7 +489,7 @@ where boot_node_ids .entry(bootnode.peer_id.into()) .or_default() - .push(bootnode.multiaddr.clone()); + .push(bootnode.multiaddr.clone().into()); } let boot_node_ids = Arc::new(boot_node_ids); @@ -502,11 +513,11 @@ where format!("{} ({})", network_config.client_version, network_config.node_name); let discovery_config = { - let mut config = DiscoveryConfig::new(local_public.to_peer_id()); + let mut config = DiscoveryConfig::new(local_peer_id); config.with_permanent_addresses( known_addresses .iter() - .map(|(peer, address)| (peer.into(), address.clone())) + .map(|(peer, address)| (peer.into(), address.clone().into())) .collect::>(), ); config.discovery_limit(u64::from(network_config.default_peers_set.out_peers) + 15); @@ -544,7 +555,7 @@ where let result = Behaviour::new( protocol, user_agent, - local_public, + local_public.into(), discovery_config, request_response_protocols, Arc::clone(&peer_store_handle), @@ -604,14 +615,14 @@ where }; // Listen on multiaddresses. - for addr in &network_config.listen_addresses { + for addr in &listen_addresses { if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. - for addr in &network_config.public_addresses { + for addr in &public_addresses { Swarm::>::add_external_address( &mut swarm, addr.clone(), @@ -619,15 +630,15 @@ where ); } - let listen_addresses = Arc::new(Mutex::new(HashSet::new())); + let listen_addresses_set = Arc::new(Mutex::new(HashSet::new())); let service = Arc::new(NetworkService { bandwidth, external_addresses, - listen_addresses: listen_addresses.clone(), + listen_addresses: listen_addresses_set.clone(), num_connected: num_connected.clone(), local_peer_id, - local_identity, + local_identity: local_identity.into(), to_worker, notification_protocol_ids, protocol_handles, @@ -638,7 +649,7 @@ where }); Ok(NetworkWorker { - listen_addresses, + listen_addresses: listen_addresses_set, num_connected, network_service: swarm, service, @@ -880,13 +891,13 @@ where H: ExHashT, { /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().iter().cloned().collect() + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().iter().cloned().map(Into::into).collect() } /// Returns the listener addresses (without trailing `/p2p/` with our `PeerId`). - fn listen_addresses(&self) -> Vec { - self.listen_addresses.lock().iter().cloned().collect() + fn listen_addresses(&self) -> Vec { + self.listen_addresses.lock().iter().cloned().map(Into::into).collect() } /// Returns the local Peer ID. @@ -998,10 +1009,14 @@ where self.sync_protocol_handle.set_reserved_only(reserved_only); } - fn add_known_address(&self, peer_id: sc_network_types::PeerId, addr: Multiaddr) { + fn add_known_address( + &self, + peer_id: sc_network_types::PeerId, + addr: sc_network_types::multiaddr::Multiaddr, + ) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.into(), addr)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.into(), addr.into())); } fn report_peer(&self, peer_id: sc_network_types::PeerId, cost_benefit: ReputationChange) { @@ -1034,7 +1049,7 @@ where let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddKnownAddress( peer.peer_id.into(), - peer.multiaddr, + peer.multiaddr.into(), )); self.sync_protocol_handle.add_reserved_peer(peer.peer_id.into()); @@ -1048,16 +1063,16 @@ where fn set_reserved_peers( &self, protocol: ProtocolName, - peers: HashSet, + peers: HashSet, ) -> Result<(), String> { let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { return Err(format!("Cannot set reserved peers for unknown protocol: {}", protocol)) }; + let peers: HashSet = peers.into_iter().map(Into::into).collect(); let peers_addrs = self.split_multiaddr_and_peer_id(peers)?; - let mut peers: HashSet = - HashSet::with_capacity(peers_addrs.len()); + let mut peers: HashSet = HashSet::with_capacity(peers_addrs.len()); for (peer_id, addr) in peers_addrs.into_iter() { // Make sure the local peer ID is never added to the PSM. @@ -1074,8 +1089,7 @@ where } } - self.protocol_handles[usize::from(*set_id)] - .set_reserved_peers(peers.iter().map(|peer| (*peer).into()).collect()); + self.protocol_handles[usize::from(*set_id)].set_reserved_peers(peers); Ok(()) } @@ -1083,7 +1097,7 @@ where fn add_peers_to_reserved_set( &self, protocol: ProtocolName, - peers: HashSet, + peers: HashSet, ) -> Result<(), String> { let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { return Err(format!( @@ -1092,6 +1106,7 @@ where )) }; + let peers: HashSet = peers.into_iter().map(Into::into).collect(); let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { @@ -1723,8 +1738,8 @@ where { if let DialError::WrongPeerId { obtained, endpoint } = &error { if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { - let address_without_peer_id = parse_addr(address.clone()) - .map_or_else(|_| address.clone(), |r| r.1); + let address_without_peer_id = parse_addr(address.clone().into()) + .map_or_else(|_| address.clone(), |r| r.1.into()); // Only report for address of boot node that was added at startup of // the node and not for any address that the node learned of the @@ -1860,14 +1875,14 @@ where } pub(crate) fn ensure_addresses_consistent_with_transport<'a>( - addresses: impl Iterator, + addresses: impl Iterator, transport: &TransportConfig, ) -> Result<(), Error> { + use sc_network_types::multiaddr::Protocol; + if matches!(transport, TransportConfig::MemoryOnly) { let addresses: Vec<_> = addresses - .filter(|x| { - x.iter().any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) - }) + .filter(|x| x.iter().any(|y| !matches!(y, Protocol::Memory(_)))) .cloned() .collect(); @@ -1879,7 +1894,7 @@ pub(crate) fn ensure_addresses_consistent_with_transport<'a>( } } else { let addresses: Vec<_> = addresses - .filter(|x| x.iter().any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_)))) + .filter(|x| x.iter().any(|y| matches!(y, Protocol::Memory(_)))) .cloned() .collect(); diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index 9bbaeb1026f..d1ea9a2ed56 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -28,7 +28,7 @@ use crate::{ request_responses::{IfDisconnected, RequestFailure}, service::{metrics::NotificationMetrics, signature::Signature, PeerStoreProvider}, types::ProtocolName, - Multiaddr, ReputationChange, + ReputationChange, }; use futures::{channel::oneshot, Stream}; @@ -36,7 +36,7 @@ use prometheus_endpoint::Registry; use sc_client_api::BlockBackend; use sc_network_common::{role::ObservedRole, ExHashT}; -use sc_network_types::PeerId; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration}; diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs index 2e7e12af53d..141edc7c884 100644 --- a/substrate/client/network/sync/src/service/mock.rs +++ b/substrate/client/network/sync/src/service/mock.rs @@ -23,10 +23,10 @@ use sc_network::{ config::MultiaddrWithPeerId, request_responses::{IfDisconnected, RequestFailure}, types::ProtocolName, - Multiaddr, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, ReputationChange, + NetworkPeers, NetworkRequest, NetworkSyncForkRequest, ReputationChange, }; use sc_network_common::role::ObservedRole; -use sc_network_types::PeerId; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; diff --git a/substrate/client/network/test/src/fuzz.rs b/substrate/client/network/test/src/fuzz.rs index 69d08d47d26..b0cd6dcf999 100644 --- a/substrate/client/network/test/src/fuzz.rs +++ b/substrate/client/network/test/src/fuzz.rs @@ -20,6 +20,7 @@ //! and `PeerStore` to discover possible inconsistencies in peer management. use futures::prelude::*; +use libp2p::PeerId; use rand::{ distributions::{Distribution, Uniform, WeightedIndex}, seq::IteratorRandom, @@ -27,7 +28,7 @@ use rand::{ use sc_network::{ peer_store::{PeerStore, PeerStoreProvider}, protocol_controller::{IncomingIndex, Message, ProtoSetConfig, ProtocolController, SetId}, - PeerId, ReputationChange, + ReputationChange, }; use sc_utils::mpsc::tracing_unbounded; use std::{ diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 48a4b3d6e6e..8a8f9608051 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -35,7 +35,7 @@ use std::{ }; use futures::{channel::oneshot, future::BoxFuture, pin_mut, prelude::*}; -use libp2p::{build_multiaddr, PeerId}; +use libp2p::PeerId; use log::trace; use parking_lot::Mutex; use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; @@ -57,8 +57,8 @@ use sc_network::{ peer_store::PeerStore, request_responses::ProtocolConfig as RequestResponseConfig, types::ProtocolName, - Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, - NetworkWorker, NotificationMetrics, NotificationService, + NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, NetworkWorker, + NotificationMetrics, NotificationService, }; use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -71,6 +71,7 @@ use sc_network_sync::{ }, warp_request_handler, }; +use sc_network_types::{build_multiaddr, multiaddr::Multiaddr}; use sc_service::client::Client; use sp_blockchain::{ Backend as BlockchainBackend, HeaderBackend, Info as BlockchainInfo, Result as ClientResult, @@ -985,7 +986,7 @@ pub trait TestNetFactory: Default + Sized + Send { for peer in peers.iter_mut() { peer.network.add_known_address( network.service().local_peer_id().into(), - listen_addr.clone(), + listen_addr.clone().into(), ); } diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index 8815ccdca3c..f9d9330a439 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -11,9 +11,14 @@ documentation = "https://docs.rs/sc-network-types" [dependencies] bs58 = "0.5.0" +ed25519-dalek = "2.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } multiaddr = "0.17.0" multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } rand = "0.8.5" thiserror = "1.0.48" +zeroize = { version = "1.7.0", default-features = false } + +[dev-dependencies] +quickcheck = "1.0.3" diff --git a/substrate/client/network/types/src/ed25519.rs b/substrate/client/network/types/src/ed25519.rs new file mode 100644 index 00000000000..e85f405b130 --- /dev/null +++ b/substrate/client/network/types/src/ed25519.rs @@ -0,0 +1,551 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Ed25519 keys. + +use crate::PeerId; +use core::{cmp, fmt, hash}; +use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; +use libp2p_identity::ed25519 as libp2p_ed25519; +use litep2p::crypto::ed25519 as litep2p_ed25519; +use zeroize::Zeroize; + +/// An Ed25519 keypair. +#[derive(Clone)] +pub struct Keypair(ed25519::SigningKey); + +impl Keypair { + /// Generate a new random Ed25519 keypair. + pub fn generate() -> Keypair { + Keypair::from(SecretKey::generate()) + } + + /// Convert the keypair into a byte array by concatenating the bytes + /// of the secret scalar and the compressed public point, + /// an informal standard for encoding Ed25519 keypairs. + pub fn to_bytes(&self) -> [u8; 64] { + self.0.to_keypair_bytes() + } + + /// Try to parse a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// produced by [`Keypair::to_bytes`], zeroing the input on success. + /// + /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. + pub fn try_from_bytes(kp: &mut [u8]) -> Result { + let bytes = <[u8; 64]>::try_from(&*kp) + .map_err(|e| DecodingError::KeypairParseError(Box::new(e)))?; + + ed25519::SigningKey::from_keypair_bytes(&bytes) + .map(|k| { + kp.zeroize(); + Keypair(k) + }) + .map_err(|e| DecodingError::KeypairParseError(Box::new(e))) + } + + /// Sign a message using the private key of this keypair. + pub fn sign(&self, msg: &[u8]) -> Vec { + self.0.sign(msg).to_bytes().to_vec() + } + + /// Get the public key of this keypair. + pub fn public(&self) -> PublicKey { + PublicKey(self.0.verifying_key()) + } + + /// Get the secret key of this keypair. + pub fn secret(&self) -> SecretKey { + SecretKey(self.0.to_bytes()) + } +} + +impl fmt::Debug for Keypair { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Keypair").field("public", &self.0.verifying_key()).finish() + } +} + +impl From for Keypair { + fn from(kp: litep2p_ed25519::Keypair) -> Self { + Self::try_from_bytes(&mut kp.encode()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for litep2p_ed25519::Keypair { + fn from(kp: Keypair) -> Self { + Self::decode(&mut kp.to_bytes()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for Keypair { + fn from(kp: libp2p_ed25519::Keypair) -> Self { + Self::try_from_bytes(&mut kp.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +impl From for libp2p_ed25519::Keypair { + fn from(kp: Keypair) -> Self { + Self::try_from_bytes(&mut kp.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +/// Demote an Ed25519 keypair to a secret key. +impl From for SecretKey { + fn from(kp: Keypair) -> SecretKey { + SecretKey(kp.0.to_bytes()) + } +} + +/// Promote an Ed25519 secret key into a keypair. +impl From for Keypair { + fn from(sk: SecretKey) -> Keypair { + let signing = ed25519::SigningKey::from_bytes(&sk.0); + Keypair(signing) + } +} + +/// An Ed25519 public key. +#[derive(Eq, Clone)] +pub struct PublicKey(ed25519::VerifyingKey); + +impl fmt::Debug for PublicKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("PublicKey(compressed): ")?; + for byte in self.0.as_bytes() { + write!(f, "{byte:x}")?; + } + Ok(()) + } +} + +impl cmp::PartialEq for PublicKey { + fn eq(&self, other: &Self) -> bool { + self.0.as_bytes().eq(other.0.as_bytes()) + } +} + +impl hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.0.as_bytes().hash(state); + } +} + +impl cmp::PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for PublicKey { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +impl PublicKey { + /// Verify the Ed25519 signature on a message using the public key. + pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { + ed25519::Signature::try_from(sig).and_then(|s| self.0.verify(msg, &s)).is_ok() + } + + /// Convert the public key to a byte array in compressed form, i.e. + /// where one coordinate is represented by a single bit. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() + } + + /// Try to parse a public key from a byte array containing the actual key as produced by + /// `to_bytes`. + pub fn try_from_bytes(k: &[u8]) -> Result { + let k = + <[u8; 32]>::try_from(k).map_err(|e| DecodingError::PublicKeyParseError(Box::new(e)))?; + ed25519::VerifyingKey::from_bytes(&k) + .map_err(|e| DecodingError::PublicKeyParseError(Box::new(e))) + .map(PublicKey) + } + + /// Convert public key to `PeerId`. + pub fn to_peer_id(&self) -> PeerId { + litep2p::PeerId::from(litep2p::crypto::PublicKey::Ed25519(self.clone().into())).into() + } +} + +impl From for PublicKey { + fn from(k: litep2p_ed25519::PublicKey) -> Self { + Self::try_from_bytes(&k.encode()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for litep2p_ed25519::PublicKey { + fn from(k: PublicKey) -> Self { + Self::decode(&k.to_bytes()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for PublicKey { + fn from(k: libp2p_ed25519::PublicKey) -> Self { + Self::try_from_bytes(&k.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +impl From for libp2p_ed25519::PublicKey { + fn from(k: PublicKey) -> Self { + Self::try_from_bytes(&k.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +/// An Ed25519 secret key. +#[derive(Clone)] +pub struct SecretKey(ed25519::SecretKey); + +/// View the bytes of the secret key. +impl AsRef<[u8]> for SecretKey { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl fmt::Debug for SecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SecretKey") + } +} + +impl SecretKey { + /// Generate a new Ed25519 secret key. + pub fn generate() -> SecretKey { + let signing = ed25519::SigningKey::generate(&mut rand::rngs::OsRng); + SecretKey(signing.to_bytes()) + } + + /// Try to parse an Ed25519 secret key from a byte slice + /// containing the actual key, zeroing the input on success. + /// If the bytes do not constitute a valid Ed25519 secret key, an error is + /// returned. + pub fn try_from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { + let sk_bytes = sk_bytes.as_mut(); + let secret = <[u8; 32]>::try_from(&*sk_bytes) + .map_err(|e| DecodingError::SecretKeyParseError(Box::new(e)))?; + sk_bytes.zeroize(); + Ok(SecretKey(secret)) + } + + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } +} + +impl Drop for SecretKey { + fn drop(&mut self) { + self.0.zeroize(); + } +} + +impl From for SecretKey { + fn from(sk: litep2p_ed25519::SecretKey) -> Self { + Self::try_from_bytes(&mut sk.to_bytes()).expect("Ed25519 key to be 32 bytes length") + } +} + +impl From for litep2p_ed25519::SecretKey { + fn from(sk: SecretKey) -> Self { + Self::from_bytes(&mut sk.to_bytes()) + .expect("litep2p `SecretKey` to accept 32 bytes as Ed25519 key") + } +} + +impl From for SecretKey { + fn from(sk: libp2p_ed25519::SecretKey) -> Self { + Self::try_from_bytes(&mut sk.as_ref().to_owned()) + .expect("Ed25519 key to be 32 bytes length") + } +} + +impl From for libp2p_ed25519::SecretKey { + fn from(sk: SecretKey) -> Self { + Self::try_from_bytes(&mut sk.to_bytes()) + .expect("libp2p `SecretKey` to accept 32 bytes as Ed25519 key") + } +} + +/// Error when decoding `ed25519`-related types. +#[derive(Debug, thiserror::Error)] +pub enum DecodingError { + #[error("failed to parse Ed25519 keypair: {0}")] + KeypairParseError(Box), + #[error("failed to parse Ed25519 secret key: {0}")] + SecretKeyParseError(Box), + #[error("failed to parse Ed25519 public key: {0}")] + PublicKeyParseError(Box), +} + +#[cfg(test)] +mod tests { + use super::*; + use quickcheck::*; + + fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool { + kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes() + } + + #[test] + fn ed25519_keypair_encode_decode() { + fn prop() -> bool { + let kp1 = Keypair::generate(); + let mut kp1_enc = kp1.to_bytes(); + let kp2 = Keypair::try_from_bytes(&mut kp1_enc).unwrap(); + eq_keypairs(&kp1, &kp2) && kp1_enc.iter().all(|b| *b == 0) + } + QuickCheck::new().tests(10).quickcheck(prop as fn() -> _); + } + + #[test] + fn ed25519_keypair_from_secret() { + fn prop() -> bool { + let kp1 = Keypair::generate(); + let mut sk = kp1.0.to_bytes(); + let kp2 = Keypair::from(SecretKey::try_from_bytes(&mut sk).unwrap()); + eq_keypairs(&kp1, &kp2) && sk == [0u8; 32] + } + QuickCheck::new().tests(10).quickcheck(prop as fn() -> _); + } + + #[test] + fn ed25519_signature() { + let kp = Keypair::generate(); + let pk = kp.public(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + assert!(pk.verify(msg, &sig)); + + let mut invalid_sig = sig.clone(); + invalid_sig[3..6].copy_from_slice(&[10, 23, 42]); + assert!(!pk.verify(msg, &invalid_sig)); + + let invalid_msg = "h3ll0 w0rld".as_bytes(); + assert!(!pk.verify(invalid_msg, &sig)); + } + + #[test] + fn substrate_kp_to_libs() { + let kp = Keypair::generate(); + let kp_bytes = kp.to_bytes(); + let kp1: libp2p_ed25519::Keypair = kp.clone().into(); + let kp2: litep2p_ed25519::Keypair = kp.clone().into(); + let kp3 = libp2p_ed25519::Keypair::try_from_bytes(&mut kp_bytes.clone()).unwrap(); + let kp4 = litep2p_ed25519::Keypair::decode(&mut kp_bytes.clone()).unwrap(); + + assert_eq!(kp_bytes, kp1.to_bytes()); + assert_eq!(kp_bytes, kp2.encode()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + let sig1 = kp1.sign(msg); + let sig2 = kp2.sign(msg); + let sig3 = kp3.sign(msg); + let sig4 = kp4.sign(msg); + + assert_eq!(sig, sig1); + assert_eq!(sig, sig2); + assert_eq!(sig, sig3); + assert_eq!(sig, sig4); + + let pk1 = kp1.public(); + let pk2 = kp2.public(); + let pk3 = kp3.public(); + let pk4 = kp4.public(); + + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + assert!(pk3.verify(msg, &sig)); + assert!(pk4.verify(msg, &sig)); + } + + #[test] + fn litep2p_kp_to_substrate_kp() { + let kp = litep2p_ed25519::Keypair::generate(); + let kp1: Keypair = kp.clone().into(); + let kp2 = Keypair::try_from_bytes(&mut kp.encode()).unwrap(); + + assert_eq!(kp.encode(), kp1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + let sig1 = kp1.sign(msg); + let sig2 = kp2.sign(msg); + + assert_eq!(sig, sig1); + assert_eq!(sig, sig2); + + let pk1 = kp1.public(); + let pk2 = kp2.public(); + + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn libp2p_kp_to_substrate_kp() { + let kp = libp2p_ed25519::Keypair::generate(); + let kp1: Keypair = kp.clone().into(); + let kp2 = Keypair::try_from_bytes(&mut kp.to_bytes()).unwrap(); + + assert_eq!(kp.to_bytes(), kp1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + let sig1 = kp1.sign(msg); + let sig2 = kp2.sign(msg); + + assert_eq!(sig, sig1); + assert_eq!(sig, sig2); + + let pk1 = kp1.public(); + let pk2 = kp2.public(); + + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn substrate_pk_to_libs() { + let kp = Keypair::generate(); + let pk = kp.public(); + let pk_bytes = pk.to_bytes(); + let pk1: libp2p_ed25519::PublicKey = pk.clone().into(); + let pk2: litep2p_ed25519::PublicKey = pk.clone().into(); + let pk3 = libp2p_ed25519::PublicKey::try_from_bytes(&pk_bytes).unwrap(); + let pk4 = litep2p_ed25519::PublicKey::decode(&pk_bytes).unwrap(); + + assert_eq!(pk_bytes, pk1.to_bytes()); + assert_eq!(pk_bytes, pk2.encode()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert!(pk.verify(msg, &sig)); + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + assert!(pk3.verify(msg, &sig)); + assert!(pk4.verify(msg, &sig)); + } + + #[test] + fn litep2p_pk_to_substrate_pk() { + let kp = litep2p_ed25519::Keypair::generate(); + let pk = kp.public(); + let pk_bytes = pk.clone().encode(); + let pk1: PublicKey = pk.clone().into(); + let pk2 = PublicKey::try_from_bytes(&pk_bytes).unwrap(); + + assert_eq!(pk_bytes, pk1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert!(pk.verify(msg, &sig)); + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn libp2p_pk_to_substrate_pk() { + let kp = libp2p_ed25519::Keypair::generate(); + let pk = kp.public(); + let pk_bytes = pk.clone().to_bytes(); + let pk1: PublicKey = pk.clone().into(); + let pk2 = PublicKey::try_from_bytes(&pk_bytes).unwrap(); + + assert_eq!(pk_bytes, pk1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert!(pk.verify(msg, &sig)); + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn substrate_sk_to_libs() { + let sk = SecretKey::generate(); + let sk_bytes = sk.to_bytes(); + let sk1: libp2p_ed25519::SecretKey = sk.clone().into(); + let sk2: litep2p_ed25519::SecretKey = sk.clone().into(); + let sk3 = libp2p_ed25519::SecretKey::try_from_bytes(&mut sk_bytes.clone()).unwrap(); + let sk4 = litep2p_ed25519::SecretKey::from_bytes(&mut sk_bytes.clone()).unwrap(); + + let kp: Keypair = sk.into(); + let kp1: libp2p_ed25519::Keypair = sk1.into(); + let kp2: litep2p_ed25519::Keypair = sk2.into(); + let kp3: libp2p_ed25519::Keypair = sk3.into(); + let kp4: litep2p_ed25519::Keypair = sk4.into(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert_eq!(sig, kp1.sign(msg)); + assert_eq!(sig, kp2.sign(msg)); + assert_eq!(sig, kp3.sign(msg)); + assert_eq!(sig, kp4.sign(msg)); + } + + #[test] + fn litep2p_sk_to_substrate_sk() { + let sk = litep2p_ed25519::SecretKey::generate(); + let sk1: SecretKey = sk.clone().into(); + let sk2 = SecretKey::try_from_bytes(&mut sk.to_bytes()).unwrap(); + + let kp: litep2p_ed25519::Keypair = sk.into(); + let kp1: Keypair = sk1.into(); + let kp2: Keypair = sk2.into(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert_eq!(sig, kp1.sign(msg)); + assert_eq!(sig, kp2.sign(msg)); + } + + #[test] + fn libp2p_sk_to_substrate_sk() { + let sk = libp2p_ed25519::SecretKey::generate(); + let sk_bytes = sk.as_ref().to_owned(); + let sk1: SecretKey = sk.clone().into(); + let sk2 = SecretKey::try_from_bytes(sk_bytes).unwrap(); + + let kp: libp2p_ed25519::Keypair = sk.into(); + let kp1: Keypair = sk1.into(); + let kp2: Keypair = sk2.into(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert_eq!(sig, kp1.sign(msg)); + assert_eq!(sig, kp2.sign(msg)); + } +} diff --git a/substrate/client/network/types/src/lib.rs b/substrate/client/network/types/src/lib.rs index 9a126c48c7e..5684e38ab2e 100644 --- a/substrate/client/network/types/src/lib.rs +++ b/substrate/client/network/types/src/lib.rs @@ -13,6 +13,12 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -mod peer_id; +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod ed25519; +pub mod multiaddr; +pub mod multihash; +mod peer_id; pub use peer_id::PeerId; diff --git a/substrate/client/network/types/src/multiaddr.rs b/substrate/client/network/types/src/multiaddr.rs new file mode 100644 index 00000000000..312bef9baab --- /dev/null +++ b/substrate/client/network/types/src/multiaddr.rs @@ -0,0 +1,251 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use litep2p::types::multiaddr::{ + Error as LiteP2pError, Iter as LiteP2pIter, Multiaddr as LiteP2pMultiaddr, + Protocol as LiteP2pProtocol, +}; +use std::{ + fmt::{self, Debug, Display}, + str::FromStr, +}; + +mod protocol; +pub use protocol::Protocol; + +// Re-export the macro under shorter name under `multiaddr`. +pub use crate::build_multiaddr as multiaddr; + +/// [`Multiaddr`] type used in Substrate. Converted to libp2p's `Multiaddr` +/// or litep2p's `Multiaddr` when passed to the corresponding network backend. + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct Multiaddr { + multiaddr: LiteP2pMultiaddr, +} + +impl Multiaddr { + /// Create a new, empty multiaddress. + pub fn empty() -> Self { + Self { multiaddr: LiteP2pMultiaddr::empty() } + } + + /// Adds an address component to the end of this multiaddr. + pub fn push(&mut self, p: Protocol<'_>) { + self.multiaddr.push(p.into()) + } + + /// Pops the last `Protocol` of this multiaddr, or `None` if the multiaddr is empty. + pub fn pop<'a>(&mut self) -> Option> { + self.multiaddr.pop().map(Into::into) + } + + /// Like [`Multiaddr::push`] but consumes `self`. + pub fn with(self, p: Protocol<'_>) -> Self { + self.multiaddr.with(p.into()).into() + } + + /// Returns the components of this multiaddress. + pub fn iter(&self) -> Iter<'_> { + self.multiaddr.iter().into() + } + + /// Return a copy of this [`Multiaddr`]'s byte representation. + pub fn to_vec(&self) -> Vec { + self.multiaddr.to_vec() + } +} + +impl Display for Multiaddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Display::fmt(&self.multiaddr, f) + } +} + +/// Remove an extra layer of nestedness by deferring to the wrapped value's [`Debug`]. +impl Debug for Multiaddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.multiaddr, f) + } +} + +impl AsRef<[u8]> for Multiaddr { + fn as_ref(&self) -> &[u8] { + self.multiaddr.as_ref() + } +} + +impl From for Multiaddr { + fn from(multiaddr: LiteP2pMultiaddr) -> Self { + Self { multiaddr } + } +} + +impl From for LiteP2pMultiaddr { + fn from(multiaddr: Multiaddr) -> Self { + multiaddr.multiaddr + } +} + +impl TryFrom> for Multiaddr { + type Error = ParseError; + + fn try_from(v: Vec) -> Result { + let multiaddr = LiteP2pMultiaddr::try_from(v)?; + Ok(Self { multiaddr }) + } +} + +/// Error when parsing a [`Multiaddr`] from string. +#[derive(Debug, thiserror::Error)] +pub enum ParseError { + /// Less data provided than indicated by length. + #[error("less data than indicated by length")] + DataLessThanLen, + /// Invalid multiaddress. + #[error("invalid multiaddress")] + InvalidMultiaddr, + /// Invalid protocol specification. + #[error("invalid protocol string")] + InvalidProtocolString, + /// Unknown protocol string identifier. + #[error("unknown protocol '{0}'")] + UnknownProtocolString(String), + /// Unknown protocol numeric id. + #[error("unknown protocol id {0}")] + UnknownProtocolId(u32), + /// Failed to decode unsigned varint. + #[error("failed to decode unsigned varint: {0}")] + InvalidUvar(Box), + /// Other error emitted when parsing into the wrapped type. + #[error("multiaddr parsing error: {0}")] + ParsingError(Box), +} + +impl From for ParseError { + fn from(error: LiteP2pError) -> Self { + match error { + LiteP2pError::DataLessThanLen => ParseError::DataLessThanLen, + LiteP2pError::InvalidMultiaddr => ParseError::InvalidMultiaddr, + LiteP2pError::InvalidProtocolString => ParseError::InvalidProtocolString, + LiteP2pError::UnknownProtocolString(s) => ParseError::UnknownProtocolString(s), + LiteP2pError::UnknownProtocolId(n) => ParseError::UnknownProtocolId(n), + LiteP2pError::InvalidUvar(e) => ParseError::InvalidUvar(Box::new(e)), + LiteP2pError::ParsingError(e) => ParseError::ParsingError(e), + error => ParseError::ParsingError(Box::new(error)), + } + } +} + +impl FromStr for Multiaddr { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + let multiaddr = LiteP2pMultiaddr::from_str(s)?; + Ok(Self { multiaddr }) + } +} + +impl TryFrom for Multiaddr { + type Error = ParseError; + + fn try_from(s: String) -> Result { + Self::from_str(&s) + } +} + +impl<'a> TryFrom<&'a str> for Multiaddr { + type Error = ParseError; + + fn try_from(s: &'a str) -> Result { + Self::from_str(s) + } +} + +/// Iterator over `Multiaddr` [`Protocol`]s. +pub struct Iter<'a>(LiteP2pIter<'a>); + +impl<'a> Iterator for Iter<'a> { + type Item = Protocol<'a>; + + fn next(&mut self) -> Option { + self.0.next().map(Into::into) + } +} + +impl<'a> From> for Iter<'a> { + fn from(iter: LiteP2pIter<'a>) -> Self { + Self(iter) + } +} + +impl<'a> IntoIterator for &'a Multiaddr { + type Item = Protocol<'a>; + type IntoIter = Iter<'a>; + + fn into_iter(self) -> Iter<'a> { + self.multiaddr.into_iter().into() + } +} + +impl<'a> FromIterator> for Multiaddr { + fn from_iter(iter: T) -> Self + where + T: IntoIterator>, + { + LiteP2pMultiaddr::from_iter(iter.into_iter().map(Into::into)).into() + } +} + +impl<'a> From> for Multiaddr { + fn from(p: Protocol<'a>) -> Multiaddr { + let protocol: LiteP2pProtocol = p.into(); + let multiaddr: LiteP2pMultiaddr = protocol.into(); + multiaddr.into() + } +} + +/// Easy way for a user to create a `Multiaddr`. +/// +/// Example: +/// +/// ```rust +/// use sc_network_types::build_multiaddr; +/// let addr = build_multiaddr!(Ip4([127, 0, 0, 1]), Tcp(10500u16)); +/// ``` +/// +/// Each element passed to `multiaddr!` should be a variant of the `Protocol` enum. The +/// optional parameter is turned into the proper type with the `Into` trait. +/// +/// For example, `Ip4([127, 0, 0, 1])` works because `Ipv4Addr` implements `From<[u8; 4]>`. +#[macro_export] +macro_rules! build_multiaddr { + ($($comp:ident $(($param:expr))*),+) => { + { + use std::iter; + let elem = iter::empty::<$crate::multiaddr::Protocol>(); + $( + let elem = { + let cmp = $crate::multiaddr::Protocol::$comp $(( $param.into() ))*; + elem.chain(iter::once(cmp)) + }; + )+ + elem.collect::<$crate::multiaddr::Multiaddr>() + } + } +} diff --git a/substrate/client/network/types/src/multiaddr/protocol.rs b/substrate/client/network/types/src/multiaddr/protocol.rs new file mode 100644 index 00000000000..800d08fe36b --- /dev/null +++ b/substrate/client/network/types/src/multiaddr/protocol.rs @@ -0,0 +1,138 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::multihash::Multihash; +use litep2p::types::multiaddr::Protocol as LiteP2pProtocol; +use std::{ + borrow::Cow, + net::{Ipv4Addr, Ipv6Addr}, +}; + +/// [`Protocol`] describes all possible multiaddress protocols. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Protocol<'a> { + Dccp(u16), + Dns(Cow<'a, str>), + Dns4(Cow<'a, str>), + Dns6(Cow<'a, str>), + Dnsaddr(Cow<'a, str>), + Http, + Https, + Ip4(Ipv4Addr), + Ip6(Ipv6Addr), + P2pWebRtcDirect, + P2pWebRtcStar, + WebRTC, + Certhash(Multihash), + P2pWebSocketStar, + /// Contains the "port" to contact. Similar to TCP or UDP, 0 means "assign me a port". + Memory(u64), + Onion(Cow<'a, [u8; 10]>, u16), + Onion3(Cow<'a, [u8; 35]>, u16), + P2p(Multihash), + P2pCircuit, + Quic, + QuicV1, + Sctp(u16), + Tcp(u16), + Tls, + Noise, + Udp(u16), + Udt, + Unix(Cow<'a, str>), + Utp, + Ws(Cow<'a, str>), + Wss(Cow<'a, str>), +} + +impl<'a> From> for Protocol<'a> { + fn from(protocol: LiteP2pProtocol<'a>) -> Self { + match protocol { + LiteP2pProtocol::Dccp(port) => Protocol::Dccp(port), + LiteP2pProtocol::Dns(str) => Protocol::Dns(str), + LiteP2pProtocol::Dns4(str) => Protocol::Dns4(str), + LiteP2pProtocol::Dns6(str) => Protocol::Dns6(str), + LiteP2pProtocol::Dnsaddr(str) => Protocol::Dnsaddr(str), + LiteP2pProtocol::Http => Protocol::Http, + LiteP2pProtocol::Https => Protocol::Https, + LiteP2pProtocol::Ip4(ipv4_addr) => Protocol::Ip4(ipv4_addr), + LiteP2pProtocol::Ip6(ipv6_addr) => Protocol::Ip6(ipv6_addr), + LiteP2pProtocol::P2pWebRtcDirect => Protocol::P2pWebRtcDirect, + LiteP2pProtocol::P2pWebRtcStar => Protocol::P2pWebRtcStar, + LiteP2pProtocol::WebRTC => Protocol::WebRTC, + LiteP2pProtocol::Certhash(multihash) => Protocol::Certhash(multihash.into()), + LiteP2pProtocol::P2pWebSocketStar => Protocol::P2pWebSocketStar, + LiteP2pProtocol::Memory(port) => Protocol::Memory(port), + LiteP2pProtocol::Onion(str, port) => Protocol::Onion(str, port), + LiteP2pProtocol::Onion3(addr) => + Protocol::Onion3(Cow::Owned(*addr.hash()), addr.port()), + LiteP2pProtocol::P2p(multihash) => Protocol::P2p(multihash.into()), + LiteP2pProtocol::P2pCircuit => Protocol::P2pCircuit, + LiteP2pProtocol::Quic => Protocol::Quic, + LiteP2pProtocol::QuicV1 => Protocol::QuicV1, + LiteP2pProtocol::Sctp(port) => Protocol::Sctp(port), + LiteP2pProtocol::Tcp(port) => Protocol::Tcp(port), + LiteP2pProtocol::Tls => Protocol::Tls, + LiteP2pProtocol::Noise => Protocol::Noise, + LiteP2pProtocol::Udp(port) => Protocol::Udp(port), + LiteP2pProtocol::Udt => Protocol::Udt, + LiteP2pProtocol::Unix(str) => Protocol::Unix(str), + LiteP2pProtocol::Utp => Protocol::Utp, + LiteP2pProtocol::Ws(str) => Protocol::Ws(str), + LiteP2pProtocol::Wss(str) => Protocol::Wss(str), + } + } +} + +impl<'a> From> for LiteP2pProtocol<'a> { + fn from(protocol: Protocol<'a>) -> Self { + match protocol { + Protocol::Dccp(port) => LiteP2pProtocol::Dccp(port), + Protocol::Dns(str) => LiteP2pProtocol::Dns(str), + Protocol::Dns4(str) => LiteP2pProtocol::Dns4(str), + Protocol::Dns6(str) => LiteP2pProtocol::Dns6(str), + Protocol::Dnsaddr(str) => LiteP2pProtocol::Dnsaddr(str), + Protocol::Http => LiteP2pProtocol::Http, + Protocol::Https => LiteP2pProtocol::Https, + Protocol::Ip4(ipv4_addr) => LiteP2pProtocol::Ip4(ipv4_addr), + Protocol::Ip6(ipv6_addr) => LiteP2pProtocol::Ip6(ipv6_addr), + Protocol::P2pWebRtcDirect => LiteP2pProtocol::P2pWebRtcDirect, + Protocol::P2pWebRtcStar => LiteP2pProtocol::P2pWebRtcStar, + Protocol::WebRTC => LiteP2pProtocol::WebRTC, + Protocol::Certhash(multihash) => LiteP2pProtocol::Certhash(multihash.into()), + Protocol::P2pWebSocketStar => LiteP2pProtocol::P2pWebSocketStar, + Protocol::Memory(port) => LiteP2pProtocol::Memory(port), + Protocol::Onion(str, port) => LiteP2pProtocol::Onion(str, port), + Protocol::Onion3(str, port) => LiteP2pProtocol::Onion3((str.into_owned(), port).into()), + Protocol::P2p(multihash) => LiteP2pProtocol::P2p(multihash.into()), + Protocol::P2pCircuit => LiteP2pProtocol::P2pCircuit, + Protocol::Quic => LiteP2pProtocol::Quic, + Protocol::QuicV1 => LiteP2pProtocol::QuicV1, + Protocol::Sctp(port) => LiteP2pProtocol::Sctp(port), + Protocol::Tcp(port) => LiteP2pProtocol::Tcp(port), + Protocol::Tls => LiteP2pProtocol::Tls, + Protocol::Noise => LiteP2pProtocol::Noise, + Protocol::Udp(port) => LiteP2pProtocol::Udp(port), + Protocol::Udt => LiteP2pProtocol::Udt, + Protocol::Unix(str) => LiteP2pProtocol::Unix(str), + Protocol::Utp => LiteP2pProtocol::Utp, + Protocol::Ws(str) => LiteP2pProtocol::Ws(str), + Protocol::Wss(str) => LiteP2pProtocol::Wss(str), + } + } +} diff --git a/substrate/client/network/types/src/multihash.rs b/substrate/client/network/types/src/multihash.rs new file mode 100644 index 00000000000..91f5b6353a7 --- /dev/null +++ b/substrate/client/network/types/src/multihash.rs @@ -0,0 +1,192 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! [`Multihash`] implemenattion used by substrate. Currently it's a wrapper over +//! multihash used by litep2p, but it can be switched to other implementation if needed. + +use litep2p::types::multihash::{ + Code as LiteP2pCode, Error as LiteP2pError, Multihash as LiteP2pMultihash, MultihashDigest as _, +}; +use std::fmt::{self, Debug}; + +/// Default [`Multihash`] implementations. Only hashes used by substrate are defined. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Code { + /// Identity hasher. + Identity, + /// SHA-256 (32-byte hash size). + Sha2_256, +} + +impl Code { + /// Calculate digest using this [`Code`]'s hashing algorithm. + pub fn digest(&self, input: &[u8]) -> Multihash { + LiteP2pCode::from(*self).digest(input).into() + } +} + +/// Error generated when converting to [`Code`]. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Invalid multihash size. + #[error("invalid multihash size '{0}'")] + InvalidSize(u64), + /// The multihash code is not supported. + #[error("unsupported multihash code '{0:x}'")] + UnsupportedCode(u64), + /// Catch-all for other errors emitted when converting `u64` code to enum or parsing multihash + /// from bytes. Never generated as of multihash-0.17.0. + #[error("other error: {0}")] + Other(Box), +} + +impl From for Error { + fn from(error: LiteP2pError) -> Self { + match error { + LiteP2pError::InvalidSize(s) => Self::InvalidSize(s), + LiteP2pError::UnsupportedCode(c) => Self::UnsupportedCode(c), + e => Self::Other(Box::new(e)), + } + } +} + +impl From for LiteP2pCode { + fn from(code: Code) -> Self { + match code { + Code::Identity => LiteP2pCode::Identity, + Code::Sha2_256 => LiteP2pCode::Sha2_256, + } + } +} + +impl TryFrom for Code { + type Error = Error; + + fn try_from(code: LiteP2pCode) -> Result { + match code { + LiteP2pCode::Identity => Ok(Code::Identity), + LiteP2pCode::Sha2_256 => Ok(Code::Sha2_256), + _ => Err(Error::UnsupportedCode(code.into())), + } + } +} + +impl TryFrom for Code { + type Error = Error; + + fn try_from(code: u64) -> Result { + match LiteP2pCode::try_from(code) { + Ok(code) => code.try_into(), + Err(e) => Err(e.into()), + } + } +} + +impl From for u64 { + fn from(code: Code) -> Self { + LiteP2pCode::from(code).into() + } +} + +#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd)] +pub struct Multihash { + multihash: LiteP2pMultihash, +} + +impl Multihash { + /// Multihash code. + pub fn code(&self) -> u64 { + self.multihash.code() + } + + /// Multihash digest. + pub fn digest(&self) -> &[u8] { + self.multihash.digest() + } + + /// Wraps the digest in a multihash. + pub fn wrap(code: u64, input_digest: &[u8]) -> Result { + LiteP2pMultihash::wrap(code, input_digest).map(Into::into).map_err(Into::into) + } + + /// Parses a multihash from bytes. + /// + /// You need to make sure the passed in bytes have the length of 64. + pub fn from_bytes(bytes: &[u8]) -> Result { + LiteP2pMultihash::from_bytes(bytes).map(Into::into).map_err(Into::into) + } + + /// Returns the bytes of a multihash. + pub fn to_bytes(&self) -> Vec { + self.multihash.to_bytes() + } +} + +/// Remove extra layer of nestedness by deferring to the wrapped value's [`Debug`]. +impl Debug for Multihash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.multihash, f) + } +} + +impl From for Multihash { + fn from(multihash: LiteP2pMultihash) -> Self { + Multihash { multihash } + } +} + +impl From for LiteP2pMultihash { + fn from(multihash: Multihash) -> Self { + multihash.multihash + } +} + +// TODO: uncomment this after upgrading `multihash` crate to v0.19.1. +// +// impl From> for Multihash { +// fn from(generic: multihash::MultihashGeneric<64>) -> Self { +// LiteP2pMultihash::wrap(generic.code(), generic.digest()) +// .expect("both have size 64; qed") +// .into() +// } +// } +// +// impl From for multihash::Multihash<64> { +// fn from(multihash: Multihash) -> Self { +// multihash::Multihash::<64>::wrap(multihash.code(), multihash.digest()) +// .expect("both have size 64; qed") +// } +// } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn code_from_u64() { + assert_eq!(Code::try_from(0x00).unwrap(), Code::Identity); + assert_eq!(Code::try_from(0x12).unwrap(), Code::Sha2_256); + assert!(matches!(Code::try_from(0x01).unwrap_err(), Error::UnsupportedCode(0x01))); + } + + #[test] + fn code_into_u64() { + assert_eq!(u64::from(Code::Identity), 0x00); + assert_eq!(u64::from(Code::Sha2_256), 0x12); + } +} diff --git a/substrate/client/network/types/src/peer_id.rs b/substrate/client/network/types/src/peer_id.rs index 14ac4a1e9aa..076be0a66c7 100644 --- a/substrate/client/network/types/src/peer_id.rs +++ b/substrate/client/network/types/src/peer_id.rs @@ -16,8 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use multiaddr::{Multiaddr, Protocol}; -use multihash::{Code, Error, Multihash}; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + multihash::{Code, Error, Multihash}, +}; use rand::Rng; use std::{fmt, hash::Hash, str::FromStr}; @@ -185,7 +187,7 @@ pub enum ParseError { #[error("unsupported multihash code '{0}'")] UnsupportedCode(u64), #[error("invalid multihash")] - InvalidMultihash(#[from] multihash::Error), + InvalidMultihash(#[from] crate::multihash::Error), } impl FromStr for PeerId { diff --git a/substrate/client/telemetry/src/endpoints.rs b/substrate/client/telemetry/src/endpoints.rs index c7a60726a56..c49b114152a 100644 --- a/substrate/client/telemetry/src/endpoints.rs +++ b/substrate/client/telemetry/src/endpoints.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{multiaddr, Multiaddr}; +use libp2p::multiaddr::{self, Multiaddr}; use serde::{Deserialize, Deserializer, Serialize}; /// List of telemetry servers we want to talk to. Contains the URL of the server, and the diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs index f8a201e7611..7e3a4ee8639 100644 --- a/substrate/client/telemetry/src/lib.rs +++ b/substrate/client/telemetry/src/lib.rs @@ -37,9 +37,9 @@ #![warn(missing_docs)] use futures::{channel::mpsc, prelude::*}; +use libp2p::Multiaddr; use log::{error, warn}; use parking_lot::Mutex; -use sc_network::Multiaddr; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use serde::Serialize; use std::{ diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs index 9b2443799d3..0bbdbfb622e 100644 --- a/substrate/client/telemetry/src/node.rs +++ b/substrate/client/telemetry/src/node.rs @@ -18,9 +18,8 @@ use crate::TelemetryPayload; use futures::{channel::mpsc, prelude::*}; -use libp2p::core::transport::Transport; +use libp2p::{core::transport::Transport, Multiaddr}; use rand::Rng as _; -use sc_network::Multiaddr; use std::{ fmt, mem, pin::Pin, -- GitLab From ec46106c33f2220d16a9dc7ad604d564d42ee009 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Tue, 21 May 2024 23:33:18 +0200 Subject: [PATCH 042/106] chore: bump zombienet version (#4535) This version includes the latest release of pjs/api (https://github.com/polkadot-js/api/releases/tag/v11.1.1). Thx! --- .gitlab/pipeline/zombienet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 7be4ba1663e..404b57b07c5 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.103" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.104" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" -- GitLab From e86bb913905790983bfdb24a0654b008666eeaad Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Wed, 22 May 2024 09:19:05 +0200 Subject: [PATCH 043/106] Update subsystem benchmark baselines (#4532) --- .../benches/approval-voting-regression-bench.rs | 4 ++-- .../benches/availability-distribution-regression-bench.rs | 6 +++--- .../benches/availability-recovery-regression-bench.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 9a5f0d29dbd..280b8c53f7d 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -81,8 +81,8 @@ fn main() -> Result<(), String> { ("Sent to peers", 63547.0330, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 7.0317, 0.1), - ("approval-voting", 9.5751, 0.1), + ("approval-distribution", 7.4075, 0.1), + ("approval-voting", 9.9873, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 5e3072be3a8..72278b5770b 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -77,9 +77,9 @@ fn main() -> Result<(), String> { ("Sent to peers", 18479.9000, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("availability-distribution", 0.0123, 0.1), - ("availability-store", 0.1597, 0.1), - ("bitfield-distribution", 0.0223, 0.1), + ("availability-distribution", 0.0127, 0.1), + ("availability-store", 0.1626, 0.1), + ("bitfield-distribution", 0.0224, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index d9bdc1a2d94..d36b898ea15 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -74,7 +74,7 @@ fn main() -> Result<(), String> { ("Received from peers", 307203.0000, 0.001), ("Sent to peers", 1.6667, 0.001), ])); - messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 12.8338, 0.1)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 12.8412, 0.1)])); if messages.is_empty() { Ok(()) -- GitLab From c7cb1f25d1f8bb1a922d466e39ee935f5f027266 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Wed, 22 May 2024 09:21:12 +0200 Subject: [PATCH 044/106] Add Extra Check in Primary Username Setter (#4534) --- prdoc/pr_4534.prdoc | 13 +++++ substrate/frame/identity/src/lib.rs | 4 +- substrate/frame/identity/src/tests.rs | 72 ++++++++++++++++++++++++++- 3 files changed, 87 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_4534.prdoc diff --git a/prdoc/pr_4534.prdoc b/prdoc/pr_4534.prdoc new file mode 100644 index 00000000000..417e4d3dace --- /dev/null +++ b/prdoc/pr_4534.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add Extra Check in Primary Username Setter + +doc: + - audience: Runtime User + description: | + Setting primary usernames requires an additional verification. + +crates: + - name: pallet-identity + bump: patch diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 4a977880b31..5a36101cc2f 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -1169,7 +1169,9 @@ pub mod pallet { pub fn set_primary_username(origin: OriginFor, username: Username) -> DispatchResult { // ensure `username` maps to `origin` (i.e. has already been set by an authority). let who = ensure_signed(origin)?; - ensure!(AccountOfUsername::::contains_key(&username), Error::::NoUsername); + let account_of_username = + AccountOfUsername::::get(&username).ok_or(Error::::NoUsername)?; + ensure!(who == account_of_username, Error::::InvalidUsername); let (registration, _maybe_username) = IdentityOf::::get(&who).ok_or(Error::::NoIdentity)?; IdentityOf::::insert(&who, (registration, Some(username.clone()))); diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 0a9464256ce..60579a23b91 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -25,7 +25,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{ - assert_noop, assert_ok, derive_impl, parameter_types, + assert_err, assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, ConstU64, Get, OnFinalize, OnInitialize}, BoundedVec, }; @@ -1491,6 +1491,76 @@ fn setting_primary_should_work() { }); } +#[test] +fn must_own_primary() { + new_test_ext().execute_with(|| { + // set up authority + let [authority, _] = unfunded_accounts(); + let suffix: Vec = b"test".to_vec(); + let allocation: u32 = 10; + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + authority.clone(), + suffix.clone(), + allocation + )); + + // Set up first user ("pi") and a username. + let pi_public = sr25519_generate(0.into(), None); + let pi_account: AccountIdOf = MultiSigner::Sr25519(pi_public).into_account().into(); + let (pi_username, pi_to_sign) = + test_username_of(b"username314159".to_vec(), suffix.clone()); + let encoded_pi_username = Encode::encode(&pi_to_sign.to_vec()); + let pi_signature = MultiSignature::Sr25519( + sr25519_sign(0.into(), &pi_public, &encoded_pi_username).unwrap(), + ); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + pi_account.clone(), + pi_username.clone(), + Some(pi_signature) + )); + + // Set up second user ("e") and a username. + let e_public = sr25519_generate(1.into(), None); + let e_account: AccountIdOf = MultiSigner::Sr25519(e_public).into_account().into(); + let (e_username, e_to_sign) = test_username_of(b"username271828".to_vec(), suffix.clone()); + let encoded_e_username = Encode::encode(&e_to_sign.to_vec()); + let e_signature = MultiSignature::Sr25519( + sr25519_sign(1.into(), &e_public, &encoded_e_username).unwrap(), + ); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + e_account.clone(), + e_username.clone(), + Some(e_signature) + )); + + // Ensure that both users have their usernames. + assert_eq!( + AccountOfUsername::::get::<&Username>(&pi_to_sign), + Some(pi_account.clone()) + ); + assert_eq!( + AccountOfUsername::::get::<&Username>(&e_to_sign), + Some(e_account.clone()) + ); + + // Cannot set primary to a username that does not exist. + let (_, c_username) = test_username_of(b"speedoflight".to_vec(), suffix.clone()); + assert_err!( + Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), c_username,), + Error::::NoUsername + ); + + // Cannot take someone else's username as your primary. + assert_err!( + Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), e_to_sign,), + Error::::InvalidUsername + ); + }); +} + #[test] fn unaccepted_usernames_should_expire() { new_test_ext().execute_with(|| { -- GitLab From 04161b1b75b98b4e97091b28ef73fe3ef5ed0b9d Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Wed, 22 May 2024 12:24:34 +0200 Subject: [PATCH 045/106] Remove `parameterized-consensus-hook` feature (#4380) Closes #4366 --- .../snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/pallets/parachain-system/Cargo.toml | 2 -- cumulus/pallets/parachain-system/src/lib.rs | 16 ++++------------ cumulus/pallets/xcmp-queue/Cargo.toml | 2 +- .../runtimes/assets/asset-hub-rococo/Cargo.toml | 2 +- .../runtimes/assets/asset-hub-westend/Cargo.toml | 2 +- .../runtimes/assets/test-utils/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 4 +--- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 +- .../runtimes/bridge-hubs/test-utils/Cargo.toml | 2 +- .../collectives/collectives-westend/Cargo.toml | 2 +- .../contracts/contracts-rococo/Cargo.toml | 2 +- .../runtimes/coretime/coretime-rococo/Cargo.toml | 2 +- .../coretime/coretime-westend/Cargo.toml | 2 +- .../runtimes/glutton/glutton-westend/Cargo.toml | 2 +- .../runtimes/people/people-rococo/Cargo.toml | 2 +- .../runtimes/people/people-westend/Cargo.toml | 2 +- .../runtimes/starters/seedling/Cargo.toml | 2 +- .../runtimes/starters/shell/Cargo.toml | 2 +- .../parachains/runtimes/test-utils/Cargo.toml | 2 +- .../runtimes/testing/penpal/Cargo.toml | 2 +- .../runtimes/testing/rococo-parachain/Cargo.toml | 2 +- cumulus/test/runtime/Cargo.toml | 4 +--- cumulus/test/service/Cargo.toml | 2 +- docs/sdk/Cargo.toml | 4 +--- prdoc/pr_4380.prdoc | 15 +++++++++++++++ templates/parachain/runtime/Cargo.toml | 4 +--- 27 files changed, 43 insertions(+), 46 deletions(-) create mode 100644 prdoc/pr_4380.prdoc diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 20c3fc012d0..e19c682de45 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -32,7 +32,7 @@ xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-feat xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../cumulus/pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../cumulus/pallets/parachain-system", default-features = false } pallet-collator-selection = { path = "../../../../cumulus/pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../../cumulus/parachains/pallets/parachain-info", default-features = false } parachains-runtimes-test-utils = { path = "../../../../cumulus/parachains/runtimes/test-utils", default-features = false } diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 0c94d0d05a6..1a6a19f2ab4 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -122,5 +122,3 @@ try-runtime = [ "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] - -parameterized-consensus-hook = [] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 7657dc4555e..3b609a675db 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -245,10 +245,6 @@ pub mod pallet { /// [`consensus_hook::ExpectParentIncluded`] here. This is only necessary in the case /// that collators aren't expected to have node versions that supply the included block /// in the relay-chain state proof. - /// - /// This config type is only available when the `parameterized-consensus-hook` crate feature - /// is activated. - #[cfg(feature = "parameterized-consensus-hook")] type ConsensusHook: ConsensusHook; } @@ -556,10 +552,8 @@ pub mod pallet { .expect("Invalid relay chain state proof"); // Update the desired maximum capacity according to the consensus hook. - #[cfg(feature = "parameterized-consensus-hook")] - let (consensus_hook_weight, capacity) = T::ConsensusHook::on_state_proof(&relay_state_proof); - #[cfg(not(feature = "parameterized-consensus-hook"))] - let (consensus_hook_weight, capacity) = ExpectParentIncluded::on_state_proof(&relay_state_proof); + let (consensus_hook_weight, capacity) = + T::ConsensusHook::on_state_proof(&relay_state_proof); total_weight += consensus_hook_weight; total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity); // Deposit a log indicating the relay-parent storage root. @@ -1639,10 +1633,8 @@ impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pall } /// Something that can check the inherents of a block. -#[cfg_attr( - feature = "parameterized-consensus-hook", - deprecated = "consider switching to `cumulus-pallet-parachain-system::ConsensusHook`" -)] +#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \ + Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")] pub trait CheckInherents { /// Check all inherents of the block. /// diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 1941214da2e..87602978521 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -48,7 +48,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } frame-support = { path = "../../../substrate/frame/support", features = ["experimental"] } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../parachain-system" } [features] default = ["std"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 95ce7efdf3f..34e724f19e7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -72,7 +72,7 @@ xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-paym # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 3d27f52d0d5..d28a2098bf3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -72,7 +72,7 @@ xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-paym # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 776c7dce0b4..af5b4a64680 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -24,7 +24,7 @@ sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-fea sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 0fb12531b8e..af243998d43 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -69,9 +69,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 3fbb95a17d0..4a58528498d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -65,7 +65,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 5a8fa18b929..80f0114cc4c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -29,7 +29,7 @@ pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default- # Cumulus asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } parachains-common = { path = "../../../common", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index a7f51722242..58985d71a50 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -71,7 +71,7 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index 4040e977faf..c9dd279e9c0 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -67,7 +67,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index b92dc57989c..ad85aab1f8a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -65,7 +65,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index a377e243af3..4611228da29 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -64,7 +64,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index ccd73fb5ee6..92a5bbbd137 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -47,7 +47,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 2b990d9270f..a29d6db58fe 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -62,7 +62,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index cc7b6a6e2ff..b72675900fd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -62,7 +62,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 469269e37ff..910944f54a5 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -36,7 +36,7 @@ sp-version = { path = "../../../../../substrate/primitives/version", default-fea # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index ff388d2fa2e..7a7fad537ac 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -41,7 +41,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 475acb13b8b..c081bac4bab 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -26,7 +26,7 @@ sp-tracing = { path = "../../../../substrate/primitives/tracing" } sp-core = { path = "../../../../substrate/primitives/core", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 0ac79a3eab5..3262233053e 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -69,7 +69,7 @@ xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-paym # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index e74caf6b1f4..cf734345a97 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -50,7 +50,7 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-ping = { path = "../../../pallets/ping", default-features = false } diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index eb160bd3355..014313aa891 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -41,9 +41,7 @@ sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } cumulus-primitives-aura = { path = "../../primitives/aura", default-features = false } pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index c54e19d0238..732d884528f 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -85,7 +85,7 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 269ed4d012c..7df241aa9c8 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -58,9 +58,7 @@ substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } # Cumulus cumulus-pallet-aura-ext = { path = "../../cumulus/pallets/aura-ext" } -cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system", features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system" } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } cumulus-primitives-proof-size-hostfunction = { path = "../../cumulus/primitives/proof-size-hostfunction" } cumulus-client-service = { path = "../../cumulus/client/service" } diff --git a/prdoc/pr_4380.prdoc b/prdoc/pr_4380.prdoc new file mode 100644 index 00000000000..1420409656b --- /dev/null +++ b/prdoc/pr_4380.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `parametrized-consensus-hook` feature + +doc: + - audience: Runtime Dev + description: | + `parametrized-consensus-hook` feature is obsolete and is removed by this PR. The + long-deprecated `CheckInherents` trait is set to be removed by September 2024. + +crates: + - name: cumulus-pallet-parachain-system + bump: major + diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 74b82f06e3a..3e1c7e4b325 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -77,9 +77,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm # Cumulus cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } -- GitLab From b06306c42c969eaa0b828413dd03dc3b7a844976 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 22 May 2024 13:29:44 +0200 Subject: [PATCH 046/106] [Backport] Version bumps and prdocs reordering from 1.12.0 (#4538) This PR backports version bumps and reorganisation of the prdoc files from the `1.12.0` release branch back to `master` --- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 2 +- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 2 +- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 2 +- .../parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 2 +- .../parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 4 ++-- polkadot/runtime/westend/src/lib.rs | 4 ++-- prdoc/{ => 1.12.0}/pr_2226.prdoc | 0 prdoc/{ => 1.12.0}/pr_3444.prdoc | 0 prdoc/{ => 1.12.0}/pr_3701.prdoc | 0 prdoc/{ => 1.12.0}/pr_3865.prdoc | 0 prdoc/{ => 1.12.0}/pr_3872.prdoc | 0 prdoc/{ => 1.12.0}/pr_3904.prdoc | 0 prdoc/{ => 1.12.0}/pr_3962.prdoc | 0 prdoc/{ => 1.12.0}/pr_3964.prdoc | 0 prdoc/{ => 1.12.0}/pr_4034.prdoc | 0 prdoc/{ => 1.12.0}/pr_4035.prdoc | 0 prdoc/{ => 1.12.0}/pr_4091.prdoc | 0 prdoc/{ => 1.12.0}/pr_4102.prdoc | 0 prdoc/{ => 1.12.0}/pr_4157.prdoc | 0 prdoc/{ => 1.12.0}/pr_4175.prdoc | 0 prdoc/{ => 1.12.0}/pr_4185.prdoc | 0 prdoc/{ => 1.12.0}/pr_4202.prdoc | 0 prdoc/{ => 1.12.0}/pr_4211.prdoc | 0 prdoc/{ => 1.12.0}/pr_4213.prdoc | 0 prdoc/{ => 1.12.0}/pr_4220.prdoc | 0 prdoc/{ => 1.12.0}/pr_4281.prdoc | 0 prdoc/{ => 1.12.0}/pr_4295.prdoc | 0 prdoc/{ => 1.12.0}/pr_4301.prdoc | 0 prdoc/{ => 1.12.0}/pr_4302.prdoc | 0 prdoc/{ => 1.12.0}/pr_4311.prdoc | 0 prdoc/{ => 1.12.0}/pr_4312.prdoc | 0 prdoc/{ => 1.12.0}/pr_4326.prdoc | 0 prdoc/{ => 1.12.0}/pr_4329.prdoc | 0 prdoc/{ => 1.12.0}/pr_4346.prdoc | 0 prdoc/{ => 1.12.0}/pr_4349.prdoc | 0 prdoc/{ => 1.12.0}/pr_4364.prdoc | 0 prdoc/{ => 1.12.0}/pr_4394.prdoc | 0 prdoc/{ => 1.12.0}/pr_4406.prdoc | 0 prdoc/{ => 1.12.0}/pr_4414.prdoc | 0 prdoc/{ => 1.12.0}/pr_4417.prdoc | 0 prdoc/{ => 1.12.0}/pr_4426.prdoc | 0 prdoc/{ => 1.12.0}/pr_4442.prdoc | 0 prdoc/{ => 1.12.0}/pr_4457.prdoc | 0 prdoc/{ => 1.12.0}/pr_4461.prdoc | 0 53 files changed, 17 insertions(+), 17 deletions(-) rename prdoc/{ => 1.12.0}/pr_2226.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3444.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3701.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3865.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3872.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3904.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3962.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_3964.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4034.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4035.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4091.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4102.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4157.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4175.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4185.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4202.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4211.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4213.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4220.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4281.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4295.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4301.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4302.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4311.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4312.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4326.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4329.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4346.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4349.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4364.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4394.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4406.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4414.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4417.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4426.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4442.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4457.prdoc (100%) rename prdoc/{ => 1.12.0}/pr_4461.prdoc (100%) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 536736c994e..be9fe82d518 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -118,7 +118,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 15, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index bc99e54e707..f630eef9214 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -120,7 +120,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 15, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 9043175a701..0c72b000c2a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -210,7 +210,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 50911b4d780..90190da82dd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -184,7 +184,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 5cb24c4edb7..29ba88df104 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -118,7 +118,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 85a85e7086c..1222e11e9a6 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -137,7 +137,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 4f4935de133..f43bb1c1e41 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -137,7 +137,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index fca1b0e7c6e..ff2456dc177 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -136,7 +136,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index b4ee0f5ae71..4092fb78594 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 68e34a0e567..5cd8aa357c3 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -128,7 +128,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 4d838fb9961..af6b5be4469 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -128,7 +128,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index b515e8ec5c9..fd4716ab972 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 0f97250a934..67930f8735c 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.11.0"; +pub const NODE_VERSION: &'static str = "1.12.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index b411cd55149..92264f205f0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -165,10 +165,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 25, + transaction_version: 26, state_version: 1, }; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d8a444c41ac..999994d68cc 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -157,10 +157,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 25, + transaction_version: 26, state_version: 1, }; diff --git a/prdoc/pr_2226.prdoc b/prdoc/1.12.0/pr_2226.prdoc similarity index 100% rename from prdoc/pr_2226.prdoc rename to prdoc/1.12.0/pr_2226.prdoc diff --git a/prdoc/pr_3444.prdoc b/prdoc/1.12.0/pr_3444.prdoc similarity index 100% rename from prdoc/pr_3444.prdoc rename to prdoc/1.12.0/pr_3444.prdoc diff --git a/prdoc/pr_3701.prdoc b/prdoc/1.12.0/pr_3701.prdoc similarity index 100% rename from prdoc/pr_3701.prdoc rename to prdoc/1.12.0/pr_3701.prdoc diff --git a/prdoc/pr_3865.prdoc b/prdoc/1.12.0/pr_3865.prdoc similarity index 100% rename from prdoc/pr_3865.prdoc rename to prdoc/1.12.0/pr_3865.prdoc diff --git a/prdoc/pr_3872.prdoc b/prdoc/1.12.0/pr_3872.prdoc similarity index 100% rename from prdoc/pr_3872.prdoc rename to prdoc/1.12.0/pr_3872.prdoc diff --git a/prdoc/pr_3904.prdoc b/prdoc/1.12.0/pr_3904.prdoc similarity index 100% rename from prdoc/pr_3904.prdoc rename to prdoc/1.12.0/pr_3904.prdoc diff --git a/prdoc/pr_3962.prdoc b/prdoc/1.12.0/pr_3962.prdoc similarity index 100% rename from prdoc/pr_3962.prdoc rename to prdoc/1.12.0/pr_3962.prdoc diff --git a/prdoc/pr_3964.prdoc b/prdoc/1.12.0/pr_3964.prdoc similarity index 100% rename from prdoc/pr_3964.prdoc rename to prdoc/1.12.0/pr_3964.prdoc diff --git a/prdoc/pr_4034.prdoc b/prdoc/1.12.0/pr_4034.prdoc similarity index 100% rename from prdoc/pr_4034.prdoc rename to prdoc/1.12.0/pr_4034.prdoc diff --git a/prdoc/pr_4035.prdoc b/prdoc/1.12.0/pr_4035.prdoc similarity index 100% rename from prdoc/pr_4035.prdoc rename to prdoc/1.12.0/pr_4035.prdoc diff --git a/prdoc/pr_4091.prdoc b/prdoc/1.12.0/pr_4091.prdoc similarity index 100% rename from prdoc/pr_4091.prdoc rename to prdoc/1.12.0/pr_4091.prdoc diff --git a/prdoc/pr_4102.prdoc b/prdoc/1.12.0/pr_4102.prdoc similarity index 100% rename from prdoc/pr_4102.prdoc rename to prdoc/1.12.0/pr_4102.prdoc diff --git a/prdoc/pr_4157.prdoc b/prdoc/1.12.0/pr_4157.prdoc similarity index 100% rename from prdoc/pr_4157.prdoc rename to prdoc/1.12.0/pr_4157.prdoc diff --git a/prdoc/pr_4175.prdoc b/prdoc/1.12.0/pr_4175.prdoc similarity index 100% rename from prdoc/pr_4175.prdoc rename to prdoc/1.12.0/pr_4175.prdoc diff --git a/prdoc/pr_4185.prdoc b/prdoc/1.12.0/pr_4185.prdoc similarity index 100% rename from prdoc/pr_4185.prdoc rename to prdoc/1.12.0/pr_4185.prdoc diff --git a/prdoc/pr_4202.prdoc b/prdoc/1.12.0/pr_4202.prdoc similarity index 100% rename from prdoc/pr_4202.prdoc rename to prdoc/1.12.0/pr_4202.prdoc diff --git a/prdoc/pr_4211.prdoc b/prdoc/1.12.0/pr_4211.prdoc similarity index 100% rename from prdoc/pr_4211.prdoc rename to prdoc/1.12.0/pr_4211.prdoc diff --git a/prdoc/pr_4213.prdoc b/prdoc/1.12.0/pr_4213.prdoc similarity index 100% rename from prdoc/pr_4213.prdoc rename to prdoc/1.12.0/pr_4213.prdoc diff --git a/prdoc/pr_4220.prdoc b/prdoc/1.12.0/pr_4220.prdoc similarity index 100% rename from prdoc/pr_4220.prdoc rename to prdoc/1.12.0/pr_4220.prdoc diff --git a/prdoc/pr_4281.prdoc b/prdoc/1.12.0/pr_4281.prdoc similarity index 100% rename from prdoc/pr_4281.prdoc rename to prdoc/1.12.0/pr_4281.prdoc diff --git a/prdoc/pr_4295.prdoc b/prdoc/1.12.0/pr_4295.prdoc similarity index 100% rename from prdoc/pr_4295.prdoc rename to prdoc/1.12.0/pr_4295.prdoc diff --git a/prdoc/pr_4301.prdoc b/prdoc/1.12.0/pr_4301.prdoc similarity index 100% rename from prdoc/pr_4301.prdoc rename to prdoc/1.12.0/pr_4301.prdoc diff --git a/prdoc/pr_4302.prdoc b/prdoc/1.12.0/pr_4302.prdoc similarity index 100% rename from prdoc/pr_4302.prdoc rename to prdoc/1.12.0/pr_4302.prdoc diff --git a/prdoc/pr_4311.prdoc b/prdoc/1.12.0/pr_4311.prdoc similarity index 100% rename from prdoc/pr_4311.prdoc rename to prdoc/1.12.0/pr_4311.prdoc diff --git a/prdoc/pr_4312.prdoc b/prdoc/1.12.0/pr_4312.prdoc similarity index 100% rename from prdoc/pr_4312.prdoc rename to prdoc/1.12.0/pr_4312.prdoc diff --git a/prdoc/pr_4326.prdoc b/prdoc/1.12.0/pr_4326.prdoc similarity index 100% rename from prdoc/pr_4326.prdoc rename to prdoc/1.12.0/pr_4326.prdoc diff --git a/prdoc/pr_4329.prdoc b/prdoc/1.12.0/pr_4329.prdoc similarity index 100% rename from prdoc/pr_4329.prdoc rename to prdoc/1.12.0/pr_4329.prdoc diff --git a/prdoc/pr_4346.prdoc b/prdoc/1.12.0/pr_4346.prdoc similarity index 100% rename from prdoc/pr_4346.prdoc rename to prdoc/1.12.0/pr_4346.prdoc diff --git a/prdoc/pr_4349.prdoc b/prdoc/1.12.0/pr_4349.prdoc similarity index 100% rename from prdoc/pr_4349.prdoc rename to prdoc/1.12.0/pr_4349.prdoc diff --git a/prdoc/pr_4364.prdoc b/prdoc/1.12.0/pr_4364.prdoc similarity index 100% rename from prdoc/pr_4364.prdoc rename to prdoc/1.12.0/pr_4364.prdoc diff --git a/prdoc/pr_4394.prdoc b/prdoc/1.12.0/pr_4394.prdoc similarity index 100% rename from prdoc/pr_4394.prdoc rename to prdoc/1.12.0/pr_4394.prdoc diff --git a/prdoc/pr_4406.prdoc b/prdoc/1.12.0/pr_4406.prdoc similarity index 100% rename from prdoc/pr_4406.prdoc rename to prdoc/1.12.0/pr_4406.prdoc diff --git a/prdoc/pr_4414.prdoc b/prdoc/1.12.0/pr_4414.prdoc similarity index 100% rename from prdoc/pr_4414.prdoc rename to prdoc/1.12.0/pr_4414.prdoc diff --git a/prdoc/pr_4417.prdoc b/prdoc/1.12.0/pr_4417.prdoc similarity index 100% rename from prdoc/pr_4417.prdoc rename to prdoc/1.12.0/pr_4417.prdoc diff --git a/prdoc/pr_4426.prdoc b/prdoc/1.12.0/pr_4426.prdoc similarity index 100% rename from prdoc/pr_4426.prdoc rename to prdoc/1.12.0/pr_4426.prdoc diff --git a/prdoc/pr_4442.prdoc b/prdoc/1.12.0/pr_4442.prdoc similarity index 100% rename from prdoc/pr_4442.prdoc rename to prdoc/1.12.0/pr_4442.prdoc diff --git a/prdoc/pr_4457.prdoc b/prdoc/1.12.0/pr_4457.prdoc similarity index 100% rename from prdoc/pr_4457.prdoc rename to prdoc/1.12.0/pr_4457.prdoc diff --git a/prdoc/pr_4461.prdoc b/prdoc/1.12.0/pr_4461.prdoc similarity index 100% rename from prdoc/pr_4461.prdoc rename to prdoc/1.12.0/pr_4461.prdoc -- GitLab From ad54bc36c1b2ce9517d023f2df9d6bdec9ca64e1 Mon Sep 17 00:00:00 2001 From: Riko <49999458+fasteater@users.noreply.github.com> Date: Wed, 22 May 2024 13:55:14 +0200 Subject: [PATCH 047/106] fixed link (#4539) --- bridges/modules/grandpa/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridges/modules/grandpa/README.md b/bridges/modules/grandpa/README.md index 4a3099b8afc..df63f4aa639 100644 --- a/bridges/modules/grandpa/README.md +++ b/bridges/modules/grandpa/README.md @@ -87,7 +87,7 @@ It'd be better for anyone (for chain and for submitters) to reject all transacti already known headers to the pallet. This way, we leave block space to other useful transactions and we don't charge concurrent submitters for their honest actions. -To deal with that, we have a [signed extension](./src/call_ext) that may be added to the runtime. +To deal with that, we have a [signed extension](./src/call_ext.rs) that may be added to the runtime. It does exactly what is required - rejects all transactions with already known headers. The submitter pays nothing for such transactions - they're simply removed from the transaction pool, when the block is built. -- GitLab From eda98954a098098ad94d3055738577016853c8af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 22 May 2024 16:03:07 +0200 Subject: [PATCH 048/106] rustls: Disable logging again (#4541) We are actually using an older version, where the log line is in a different file. --- prdoc/pr_4541.prdoc | 16 ++++++++++++++++ substrate/client/tracing/src/logging/mod.rs | 4 ++++ 2 files changed, 20 insertions(+) create mode 100644 prdoc/pr_4541.prdoc diff --git a/prdoc/pr_4541.prdoc b/prdoc/pr_4541.prdoc new file mode 100644 index 00000000000..815ea2c8006 --- /dev/null +++ b/prdoc/pr_4541.prdoc @@ -0,0 +1,16 @@ +title: "Remove warning about `BadCertificate` Version 2" + +doc: + - audience: Node Operator + description: | + The node was printing the following warning from time to time: + ``` + Sending fatal alert BadCertificate + ``` + + This is not an user error and thus, the warning will now not be printed + anymore. + +crates: + - name: sc-tracing + bump: patch diff --git a/substrate/client/tracing/src/logging/mod.rs b/substrate/client/tracing/src/logging/mod.rs index 46fd4efb339..05ec9fcf6ef 100644 --- a/substrate/client/tracing/src/logging/mod.rs +++ b/substrate/client/tracing/src/logging/mod.rs @@ -142,9 +142,13 @@ where parse_default_directive("libp2p_mdns::behaviour::iface=off") .expect("provided directive is valid"), ) + // Disable annoying log messages from rustls .add_directive( parse_default_directive("rustls::common_state=off") .expect("provided directive is valid"), + ) + .add_directive( + parse_default_directive("rustls::conn=off").expect("provided directive is valid"), ); if let Ok(lvl) = std::env::var("RUST_LOG") { -- GitLab From 8dbe4ee80734bba6644c7e5f879a363ce7c0a19f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 22 May 2024 16:21:32 +0200 Subject: [PATCH 049/106] Implement `CheckMetadataHash` extension (#4274) This implements the `CheckMetadataHash` extension as described in [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). Besides the signed extension, the `substrate-wasm-builder` is extended to support generating the metadata-hash. Closes: https://github.com/paritytech/polkadot-sdk/issues/291 --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- .github/workflows/check-semver.yml | 1 + .gitlab/pipeline/test.yml | 11 + Cargo.lock | 85 ++++++++ Cargo.toml | 1 + .../tests/assets/asset-hub-westend/Cargo.toml | 1 + .../src/tests/xcm_fee_estimation.rs | 1 + .../assets/asset-hub-rococo/Cargo.toml | 7 +- .../runtimes/assets/asset-hub-rococo/build.rs | 9 +- .../assets/asset-hub-rococo/src/lib.rs | 3 +- .../assets/asset-hub-westend/Cargo.toml | 7 +- .../assets/asset-hub-westend/build.rs | 9 +- .../assets/asset-hub-westend/src/lib.rs | 3 +- docs/sdk/Cargo.toml | 1 + docs/sdk/src/guides/enable_metadata_hash.rs | 88 ++++++++ docs/sdk/src/guides/mod.rs | 3 + polkadot/node/service/Cargo.toml | 15 +- polkadot/node/service/src/benchmarking.rs | 4 + polkadot/runtime/rococo/Cargo.toml | 7 +- polkadot/runtime/rococo/build.rs | 15 +- polkadot/runtime/rococo/src/lib.rs | 3 + polkadot/runtime/westend/Cargo.toml | 7 +- polkadot/runtime/westend/build.rs | 13 +- polkadot/runtime/westend/src/lib.rs | 2 + prdoc/pr_4274.prdoc | 39 ++++ substrate/bin/node/cli/Cargo.toml | 1 + substrate/bin/node/cli/benches/executor.rs | 2 +- substrate/bin/node/cli/src/service.rs | 16 +- substrate/bin/node/cli/tests/common.rs | 2 +- substrate/bin/node/runtime/Cargo.toml | 4 + substrate/bin/node/runtime/build.rs | 20 +- substrate/bin/node/runtime/src/lib.rs | 2 + substrate/bin/node/testing/Cargo.toml | 1 + substrate/bin/node/testing/src/bench.rs | 2 + substrate/bin/node/testing/src/keyring.rs | 13 +- substrate/client/executor/wasmtime/src/lib.rs | 4 + .../frame/metadata-hash-extension/Cargo.toml | 39 ++++ .../frame/metadata-hash-extension/src/lib.rs | 168 +++++++++++++++ .../metadata-hash-extension/src/tests.rs | 179 ++++++++++++++++ substrate/test-utils/runtime/Cargo.toml | 5 +- substrate/test-utils/runtime/build.rs | 1 + substrate/test-utils/runtime/src/extrinsic.rs | 30 ++- substrate/test-utils/runtime/src/lib.rs | 15 +- substrate/utils/wasm-builder/Cargo.toml | 31 +++ substrate/utils/wasm-builder/src/builder.rs | 36 ++++ substrate/utils/wasm-builder/src/lib.rs | 5 +- .../utils/wasm-builder/src/metadata_hash.rs | 132 ++++++++++++ .../utils/wasm-builder/src/wasm_project.rs | 197 ++++++++++-------- templates/parachain/runtime/Cargo.toml | 16 ++ templates/parachain/runtime/build.rs | 10 +- templates/parachain/runtime/src/lib.rs | 1 + 50 files changed, 1141 insertions(+), 126 deletions(-) create mode 100644 docs/sdk/src/guides/enable_metadata_hash.rs create mode 100644 prdoc/pr_4274.prdoc create mode 100644 substrate/frame/metadata-hash-extension/Cargo.toml create mode 100644 substrate/frame/metadata-hash-extension/src/lib.rs create mode 100644 substrate/frame/metadata-hash-extension/src/tests.rs create mode 100644 substrate/utils/wasm-builder/src/metadata_hash.rs diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index f0e076e8a16..04c63f4192b 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -38,6 +38,7 @@ jobs: run: | export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' + export SKIP_WASM_BUILD=1 if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc --toolchain nightly-2024-03-01 -v; then cat <::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let raw_payload = westend_runtime::SignedPayload::new(call, extra).unwrap(); let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 34e724f19e7..a880730ddac 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -189,6 +190,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -248,7 +250,10 @@ std = [ "xcm/std", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs index 239ccac19ec..99e510e2269 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs @@ -13,10 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index be9fe82d518..25c66afc8a5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -121,7 +121,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 15, + transaction_version: 16, state_version: 1, }; @@ -979,6 +979,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index d28a2098bf3..953f6a8b400 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -189,6 +190,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -247,7 +249,10 @@ std = [ "xcm/std", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs index 239ccac19ec..cf9664aeb2f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs @@ -13,10 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f630eef9214..c8d388df16c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -123,7 +123,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 15, + transaction_version: 16, state_version: 1, }; @@ -974,6 +974,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 7df241aa9c8..4a4f333de79 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -38,6 +38,7 @@ frame-system = { path = "../../substrate/frame/system", default-features = false frame-support = { path = "../../substrate/frame/support", default-features = false } frame-executive = { path = "../../substrate/frame/executive", default-features = false } pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } +frame-metadata-hash-extension = { path = "../../substrate/frame/metadata-hash-extension" } # Substrate Client sc-network = { path = "../../substrate/client/network" } diff --git a/docs/sdk/src/guides/enable_metadata_hash.rs b/docs/sdk/src/guides/enable_metadata_hash.rs new file mode 100644 index 00000000000..b9cbae85335 --- /dev/null +++ b/docs/sdk/src/guides/enable_metadata_hash.rs @@ -0,0 +1,88 @@ +//! # Enable metadata hash verification +//! +//! This guide will teach you how to enable the metadata hash verification in your runtime. +//! +//! ## What is metadata hash verification? +//! +//! Each FRAME based runtime exposes metadata about itself. This metadata is used by consumers of +//! the runtime to interpret the state, to construct transactions etc. Part of this metadata are the +//! type information. These type information can be used to e.g. decode storage entries or to decode +//! a transaction. So, the metadata is quite useful for wallets to interact with a FRAME based +//! chain. Online wallets can fetch the metadata directly from any node of the chain they are +//! connected to, but offline wallets can not do this. So, for the offline wallet to have access to +//! the metadata it needs to be transferred and stored on the device. The problem is that the +//! metadata has a size of several hundreds of kilobytes, which takes quite a while to transfer to +//! these offline wallets and the internal storage of these devices is also not big enough to store +//! the metadata for one or more networks. The next problem is that the offline wallet/user can not +//! trust the metadata to be correct. It is very important for the metadata to be correct or +//! otherwise an attacker could change them in a way that the offline wallet decodes a transaction +//! in a different way than what it will be decoded to on chain. So, the user may signs an incorrect +//! transaction leading to unexpecting behavior. +//! +//! The metadata hash verification circumvents the issues of the huge metadata and the need to trust +//! some metadata blob to be correct. To generate a hash for the metadata, the metadata is chunked, +//! these chunks are put into a merkle tree and then the root of this merkle tree is the "metadata +//! hash". For a more technical explanation on how it works, see +//! [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). At compile +//! time the metadata hash is generated and "backed" into the runtime. This makes it extremely cheap +//! for the runtime to verify on chain that the metadata hash is correct. By having the runtime +//! verify the hash on chain, the user also doesn't need to trust the offchain metadata. If the +//! metadata hash doesn't match the on chain metadata hash the transaction will be rejected. The +//! metadata hash itself is added to the data of the transaction that is signed, this means the +//! actual hash does not appear in the transaction. On chain the same procedure is repeated with the +//! metadata hash that is known by the runtime and if the metadata hash doesn't match the signature +//! verification will fail. As the metadata hash is actually the root of a merkle tree, the offline +//! wallet can get proofs of individual types to decode a transaction. This means that the offline +//! wallet does not require the entire metadata to be present on the device. +//! +//! ## Integrating metadata hash verification into your runtime +//! +//! The integration of the metadata hash verification is split into two parts, first the actual +//! integration into the runtime and secondly the enabling of the metadata hash generation at +//! compile time. +//! +//! ### Runtime integration +//! +//! From the runtime side only the +//! [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash) needs to be added to the +//! list of signed extension: +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", template_signed_extra)] +//! +//! > **Note:** +//! > +//! > Adding the signed extension changes the encoding of the transaction and adds one extra byte +//! > per transaction! +//! +//! This signed extension will make sure to decode the requested `mode` and will add the metadata +//! hash to the signed data depending on the requested `mode`. The `mode` gives the user/wallet +//! control over deciding if the metadata hash should be verified or not. The metadata hash itself +//! is drawn from the `RUNTIME_METADATA_HASH` environment variable. If the environment variable is +//! not set, any transaction that requires the metadata hash is rejected with the error +//! `CannotLookup`. This is a security measurement to prevent including invalid transactions. +//! +//!
+//! +//! The extension does not work with the native runtime, because the +//! `RUNTIME_METADATA_HASH` environment variable is not set when building the +//! `frame-metadata-hash-extension` crate. +//! +//!
+//! +//! ### Enable metadata hash generation +//! +//! The metadata hash generation needs to be enabled when building the wasm binary. The +//! `substrate-wasm-builder` supports this out of the box: +#![doc = docify::embed!("../../templates/parachain/runtime/build.rs", template_enable_metadata_hash)] +//! +//! > **Note:** +//! > +//! > The `metadata-hash` feature needs to be enabled for the `substrate-wasm-builder` to enable the +//! > code for being able to generate the metadata hash. It is also recommended to put the metadata +//! > hash generation behind a feature in the runtime as shown above. The reason behind is that it +//! > adds a lot of code which increases the compile time and the generation itself also increases +//! > the compile time. Thus, it is recommended to enable the feature only when the metadata hash is +//! > required (e.g. for an on-chain build). +//! +//! The two parameters to `enable_metadata_hash` are the token symbol and the number of decimals of +//! the primary token of the chain. These information are included for the wallets to show token +//! related operations in a more user friendly way. diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index 2dc807af8ea..f5f6d2b5e0c 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -26,3 +26,6 @@ pub mod xcm_enabled_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; + +/// How to enable metadata hash verification in the runtime. +pub mod enable_metadata_hash; diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index b3f1c22d0e7..37836f134bd 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -67,6 +67,7 @@ sp-version = { path = "../../../substrate/primitives/version" } pallet-babe = { path = "../../../substrate/frame/babe" } pallet-staking = { path = "../../../substrate/frame/staking" } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", optional = true } frame-system = { path = "../../../substrate/frame/system" } # Substrate Other @@ -187,8 +188,18 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = ["bitvec", "westend-runtime", "westend-runtime-constants"] -rococo-native = ["bitvec", "rococo-runtime", "rococo-runtime-constants"] +westend-native = [ + "bitvec", + "frame-metadata-hash-extension", + "westend-runtime", + "westend-runtime-constants", +] +rococo-native = [ + "bitvec", + "frame-metadata-hash-extension", + "rococo-runtime", + "rococo-runtime-constants", +] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index a0c4d3b0446..4dcff207841 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -201,6 +201,7 @@ fn westend_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let payload = runtime::SignedPayload::from_raw( @@ -215,6 +216,7 @@ fn westend_sign_call( (), (), (), + None, ), ); @@ -253,6 +255,7 @@ fn rococo_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let payload = runtime::SignedPayload::from_raw( @@ -267,6 +270,7 @@ fn rococo_sign_call( (), (), (), + None, ), ); diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 4765de08c1a..c78f3e668b9 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -95,6 +95,7 @@ pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-feat pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1" } @@ -134,6 +135,7 @@ std = [ "block-builder-api/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -324,6 +326,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = ["rococo-runtime-constants/fast-runtime"] @@ -332,4 +337,4 @@ runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index 403c31ff21c..7aae84cd5e0 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); @@ -24,5 +24,18 @@ fn main() { .build(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) + .build(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .enable_metadata_hash("ROC", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 92264f205f0..f0cc7e046f2 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -643,7 +643,9 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::new(true), ); + let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); @@ -1528,6 +1530,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 6a919dd00a9..01e9dd1527f 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -45,6 +45,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } @@ -141,6 +142,7 @@ std = [ "frame-benchmarking?/std", "frame-election-provider-support/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -338,6 +340,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = [] @@ -346,4 +351,4 @@ runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/polkadot/runtime/westend/build.rs b/polkadot/runtime/westend/build.rs index 0b3e12c78c7..8ff3a4fb911 100644 --- a/polkadot/runtime/westend/build.rs +++ b/polkadot/runtime/westend/build.rs @@ -14,8 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use substrate_wasm_builder::WasmBuilder; +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::build_using_defaults(); +} +#[cfg(all(feature = "metadata-hash", feature = "std"))] fn main() { - WasmBuilder::build_using_defaults(); + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); } + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 999994d68cc..cfe0dde0da1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -797,6 +797,7 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1617,6 +1618,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, ); pub struct NominationPoolsMigrationV4OldPallet; diff --git a/prdoc/pr_4274.prdoc b/prdoc/pr_4274.prdoc new file mode 100644 index 00000000000..77f5d1387cf --- /dev/null +++ b/prdoc/pr_4274.prdoc @@ -0,0 +1,39 @@ +title: Introduce `CheckMetadataHash` signed extension + +doc: + - audience: Runtime Dev + description: | + Introduces the new `CheckMetadataHash` signed extension. This extension can be added to a + runtime to support verifying the metadata hash as described in + [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). + This removes the requirement for having a metadata portal and in general a centralized + authentication of the metadata. With this signed extension the runtime is able to verify + that the metadata used by the wallet was correct. This is mainly useful for offline wallets + which users need to trust any way, not that useful for online wallets. + + There is a guide `generate_metadata_hash` for how to integrate this into a runtime that + should make it quite easy to integrate the signed extension. + - audience: Runtime User + description: | + This brings support for the new Ledger app and similar hardware wallets. These hardware + wallets will be able to decode the transaction using the metadata. The runtime will + ensure that the metadata used for this decoding process is correct and that the online + wallet did not tried to trick you. + +crates: + - name: substrate-wasm-builder + bump: minor + - name: sc-executor-wasmtime + bump: patch + - name: frame-metadata-hash-extension + bump: major + - name: polkadot-service + bump: none + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index ec9d6c306b5..9c49fd7b362 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -99,6 +99,7 @@ sc-offchain = { path = "../../../client/offchain" } # frame dependencies frame-benchmarking = { path = "../../../frame/benchmarking" } +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } frame-system = { path = "../../../frame/system" } frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" } pallet-assets = { path = "../../../frame/assets" } diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index a326e1a79ea..30b52b9ecf6 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -55,7 +55,7 @@ const HEAP_PAGES: u64 = 20; type TestExternalities = CoreTestExternalities; fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None) } fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities { diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 5dc1193daf8..938d73d91b1 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -126,6 +126,7 @@ pub fn create_extrinsic( kitchensink_runtime::Runtime, >::from(tip, None), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( @@ -140,6 +141,7 @@ pub fn create_extrinsic( (), (), (), + None, ), ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); @@ -1041,6 +1043,7 @@ mod tests { let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); + let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false); let extra = ( check_non_zero_sender, check_spec_version, @@ -1050,11 +1053,22 @@ mod tests { check_nonce, check_weight, tx_payment, + metadata_hash, ); let raw_payload = SignedPayload::from_raw( function, extra, - ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), + ( + (), + spec_version, + transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + None, + ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); let (function, extra, _) = raw_payload.deconstruct(); diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index 2d74cdd5a04..8de87c8b76e 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -83,7 +83,7 @@ pub const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_ve pub type TestExternalities = CoreTestExternalities; pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None) } pub fn default_transfer_call() -> pallet_balances::Call { diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 98b8c0ae6bf..a96576e17e1 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -58,6 +58,7 @@ sp-io = { path = "../../../primitives/io", default-features = false } frame-executive = { path = "../../../frame/executive", default-features = false } frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../frame/system", default-features = false } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } @@ -159,6 +160,7 @@ std = [ "frame-benchmarking/std", "frame-election-provider-support/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -436,3 +438,5 @@ experimental = [ "frame-system/experimental", "pallet-example-tasks/experimental", ] + +metadata-hash = ["substrate-wasm-builder/metadata-hash"] diff --git a/substrate/bin/node/runtime/build.rs b/substrate/bin/node/runtime/build.rs index b7676a70dfe..0e11c579f09 100644 --- a/substrate/bin/node/runtime/build.rs +++ b/substrate/bin/node/runtime/build.rs @@ -15,13 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(all(feature = "std", not(feature = "metadata-hash")))] fn main() { - #[cfg(feature = "std")] - { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); - } + substrate_wasm_builder::WasmBuilder::build_using_defaults() } + +#[cfg(all(feature = "std", feature = "metadata-hash"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("Test", 14) + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index b1f948afa56..5067085e8ed 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1437,6 +1437,7 @@ where tip, None, ), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -2514,6 +2515,7 @@ pub type SignedExtra = ( Runtime, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, >, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 09db10563fb..3ba3f07510e 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -21,6 +21,7 @@ fs_extra = "1" futures = "0.3.30" log = { workspace = true, default-features = true } tempfile = "3.1.0" +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } frame-system = { path = "../../../frame/system" } node-cli = { package = "staging-node-cli", path = "../cli" } node-primitives = { path = "../primitives" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index e5c2563905e..007d314684c 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -571,6 +571,8 @@ impl BenchKeyring { tx_version, genesis_hash, genesis_hash, + // metadata_hash + None::<()>, ); let key = self.accounts.get(&signed).expect("Account id not found in keyring"); let signature = payload.using_encoded(|b| { diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index f712191bed6..eab088d9100 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -82,6 +82,7 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ) } @@ -91,11 +92,19 @@ pub fn sign( spec_version: u32, tx_version: u32, genesis_hash: [u8; 32], + metadata_hash: Option<[u8; 32]>, ) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = - (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = ( + xt.function, + extra.clone(), + spec_version, + tx_version, + genesis_hash, + genesis_hash, + metadata_hash, + ); let key = AccountKeyring::from_account_id(&signed).unwrap(); let signature = payload diff --git a/substrate/client/executor/wasmtime/src/lib.rs b/substrate/client/executor/wasmtime/src/lib.rs index 82e62b4a5dd..8e8e92017df 100644 --- a/substrate/client/executor/wasmtime/src/lib.rs +++ b/substrate/client/executor/wasmtime/src/lib.rs @@ -41,3 +41,7 @@ pub use runtime::{ prepare_runtime_artifact, Config, DeterministicStackLimit, InstantiationStrategy, Semantics, WasmtimeRuntime, }; +pub use sc_executor_common::{ + runtime_blob::RuntimeBlob, + wasm_runtime::{HeapAllocStrategy, WasmModule}, +}; diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml new file mode 100644 index 00000000000..13d4bd0c2ea --- /dev/null +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "frame-metadata-hash-extension" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "FRAME signed extension for verifying the metadata hash" + +[dependencies] +array-bytes = "6.2.2" +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +log = { workspace = true, default-features = false } +docify = "0.2.8" + +[dev-dependencies] +substrate-wasm-builder = { path = "../../utils/wasm-builder", features = ["metadata-hash"] } +substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +sp-api = { path = "../../primitives/api" } +sp-transaction-pool = { path = "../../primitives/transaction-pool" } +merkleized-metadata = "0.1.0" +frame-metadata = { version = "16.0.0", features = ["current"] } +sp-tracing = { path = "../../primitives/tracing" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/substrate/frame/metadata-hash-extension/src/lib.rs b/substrate/frame/metadata-hash-extension/src/lib.rs new file mode 100644 index 00000000000..d09acbfb3df --- /dev/null +++ b/substrate/frame/metadata-hash-extension/src/lib.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! The [`CheckMetadataHash`] signed extension. +//! +//! The extension for optionally checking the metadata hash. For information how it works and what +//! it does exactly, see the docs of [`CheckMetadataHash`]. +//! +//! # Integration +//! +//! As any signed extension you will need to add it to your runtime signed extensions: +#![doc = docify::embed!("src/tests.rs", add_metadata_hash_extension)] +//! As the extension requires the `RUNTIME_METADATA_HASH` environment variable to be present at +//! compile time, it requires a little bit more setup. To have this environment variable available +//! at compile time required to tell the `substrate-wasm-builder` to do so: +#![doc = docify::embed!("src/tests.rs", enable_metadata_hash_in_wasm_builder)] +//! As generating the metadata hash requires to compile the runtime twice, it is +//! recommended to only enable the metadata hash generation when doing a build for a release or when +//! you want to test this feature. + +extern crate alloc; +/// For our tests +extern crate self as frame_metadata_hash_extension; + +use codec::{Decode, Encode}; +use frame_support::DebugNoBound; +use frame_system::Config; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, SignedExtension}, + transaction_validity::{TransactionValidityError, UnknownTransaction}, +}; + +#[cfg(test)] +mod tests; + +/// The mode of [`CheckMetadataHash`]. +#[derive(Decode, Encode, PartialEq, Debug, TypeInfo, Clone, Copy, Eq)] +enum Mode { + Disabled, + Enabled, +} + +/// Wrapper around the metadata hash and from where to get it from. +#[derive(Default, Debug, PartialEq, Clone, Copy, Eq)] +enum MetadataHash { + /// Fetch it from the `RUNTIME_METADATA_HASH` env variable at compile time. + #[default] + FetchFromEnv, + /// Use the given metadata hash. + Custom([u8; 32]), +} + +impl MetadataHash { + /// Returns the metadata hash. + fn hash(&self) -> Option<[u8; 32]> { + match self { + Self::FetchFromEnv => + option_env!("RUNTIME_METADATA_HASH").map(array_bytes::hex2array_unchecked), + Self::Custom(hash) => Some(*hash), + } + } +} + +/// Extension for optionally verifying the metadata hash. +/// +/// The metadata hash is cryptographical representation of the runtime metadata. This metadata hash +/// is build as described in [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). +/// This metadata hash should give users the confidence that what they build with an online wallet +/// is the same they are signing with their offline wallet and then applying on chain. To ensure +/// that the online wallet is not tricking the offline wallet into decoding and showing an incorrect +/// extrinsic, the offline wallet will include the metadata hash into the additional signed data and +/// the runtime will then do the same. If the metadata hash doesn't match, the signature +/// verification will fail and thus, the transaction will be rejected. The RFC contains more details +/// on how it works. +/// +/// The extension adds one byte (the `mode`) to the size of the extrinsic. This one byte is +/// controlling if the metadata hash should be added to the signed data or not. Mode `0` means that +/// the metadata hash is not added and thus, `None` is added to the signed data. Mode `1` means that +/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the signed data. Further +/// values of `mode` are reserved for future changes. +/// +/// The metadata hash is read from the environment variable `RUNTIME_METADATA_HASH`. This +/// environment variable is for example set by the `substrate-wasm-builder` when the feature for +/// generating the metadata hash is enabled. If the environment variable is not set and `mode = 1` +/// is passed, the transaction is rejected with [`UnknownTransaction::CannotLookup`]. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, DebugNoBound)] +#[scale_info(skip_type_params(T))] +pub struct CheckMetadataHash { + _phantom: core::marker::PhantomData, + mode: Mode, + #[codec(skip)] + metadata_hash: MetadataHash, +} + +impl CheckMetadataHash { + /// Creates new `SignedExtension` to check metadata hash. + pub fn new(enable: bool) -> Self { + Self { + _phantom: core::marker::PhantomData, + mode: if enable { Mode::Enabled } else { Mode::Disabled }, + metadata_hash: MetadataHash::FetchFromEnv, + } + } + + /// Create an instance that uses the given `metadata_hash`. + /// + /// This is useful for testing the extension. + pub fn new_with_custom_hash(metadata_hash: [u8; 32]) -> Self { + Self { + _phantom: core::marker::PhantomData, + mode: Mode::Enabled, + metadata_hash: MetadataHash::Custom(metadata_hash), + } + } +} + +impl SignedExtension for CheckMetadataHash { + type AccountId = T::AccountId; + type Call = ::RuntimeCall; + type AdditionalSigned = Option<[u8; 32]>; + type Pre = (); + const IDENTIFIER: &'static str = "CheckMetadataHash"; + + fn additional_signed(&self) -> Result { + let signed = match self.mode { + Mode::Disabled => None, + Mode::Enabled => match self.metadata_hash.hash() { + Some(hash) => Some(hash), + None => return Err(UnknownTransaction::CannotLookup.into()), + }, + }; + + log::debug!( + target: "runtime::metadata-hash", + "CheckMetadataHash::additional_signed => {:?}", + signed.as_ref().map(|h| array_bytes::bytes2hex("0x", h)), + ); + + Ok(signed) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.validate(who, call, info, len).map(|_| ()) + } +} diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs new file mode 100644 index 00000000000..f13eecfd94b --- /dev/null +++ b/substrate/frame/metadata-hash-extension/src/tests.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::CheckMetadataHash; +use codec::{Decode, Encode}; +use frame_metadata::RuntimeMetadataPrefixed; +use frame_support::{ + derive_impl, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, +}; +use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; +use sp_api::{Metadata, ProvideRuntimeApi}; +use sp_runtime::{ + traits::{Extrinsic as _, SignedExtension}, + transaction_validity::{TransactionSource, UnknownTransaction}, +}; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use substrate_test_runtime_client::{ + prelude::*, + runtime::{self, ExtrinsicBuilder}, + DefaultTestClientBuilderExt, TestClientBuilder, +}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime! { + pub enum Test { + System: frame_system, + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +#[test] +fn rejects_when_no_metadata_hash_was_passed() { + let ext = CheckMetadataHash::::decode(&mut &1u8.encode()[..]).unwrap(); + assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.additional_signed()); +} + +#[test] +fn rejects_unknown_mode() { + assert!(CheckMetadataHash::::decode(&mut &50u8.encode()[..]).is_err()); +} + +/// Generate the metadata hash for the `test-runtime`. +fn generate_metadata_hash(metadata: RuntimeMetadataPrefixed) -> [u8; 32] { + let runtime_version = runtime::VERSION; + let base58_prefix = 0; + + let extra_info = ExtraInfo { + spec_version: runtime_version.spec_version, + spec_name: runtime_version.spec_name.into(), + base58_prefix, + decimals: 10, + token_symbol: "TOKEN".into(), + }; + + generate_metadata_digest(&metadata.1, extra_info).unwrap().hash() +} + +#[test] +fn ensure_check_metadata_works_on_real_extrinsics() { + sp_tracing::try_init_simple(); + + let client = TestClientBuilder::new().build(); + let runtime_api = client.runtime_api(); + let best_hash = client.chain_info().best_hash; + + let metadata = RuntimeMetadataPrefixed::decode( + &mut &runtime_api.metadata_at_version(best_hash, 15).unwrap().unwrap()[..], + ) + .unwrap(); + + let valid_transaction = ExtrinsicBuilder::new_include_data(vec![1, 2, 3]) + .metadata_hash(generate_metadata_hash(metadata)) + .build(); + // Ensure that the transaction is signed. + assert!(valid_transaction.is_signed().unwrap()); + + runtime_api + .validate_transaction(best_hash, TransactionSource::External, valid_transaction, best_hash) + .unwrap() + .unwrap(); + + // Including some random metadata hash should make the transaction invalid. + let invalid_transaction = ExtrinsicBuilder::new_include_data(vec![1, 2, 3]) + .metadata_hash([10u8; 32]) + .build(); + // Ensure that the transaction is signed. + assert!(invalid_transaction.is_signed().unwrap()); + + assert_eq!( + TransactionValidityError::from(InvalidTransaction::BadProof), + runtime_api + .validate_transaction( + best_hash, + TransactionSource::External, + invalid_transaction, + best_hash + ) + .unwrap() + .unwrap_err() + ); +} + +#[allow(unused)] +mod docs { + use super::*; + + #[docify::export] + mod add_metadata_hash_extension { + frame_support::construct_runtime! { + pub enum Runtime { + System: frame_system, + } + } + + /// The `SignedExtension` to the basic transaction logic. + pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + // Add the `CheckMetadataHash` extension. + // The position in this list is not important, so we could also add it to beginning. + frame_metadata_hash_extension::CheckMetadataHash, + ); + + /// In your runtime this will be your real address type. + type Address = (); + /// In your runtime this will be your real signature type. + type Signature = (); + + /// Unchecked extrinsic type as expected by this runtime. + pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; + } + + // Put here to not have it in the docs as well. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for add_metadata_hash_extension::Runtime { + type Block = Block; + type RuntimeEvent = add_metadata_hash_extension::RuntimeEvent; + type RuntimeOrigin = add_metadata_hash_extension::RuntimeOrigin; + type RuntimeCall = add_metadata_hash_extension::RuntimeCall; + type PalletInfo = add_metadata_hash_extension::PalletInfo; + } + + #[docify::export] + fn enable_metadata_hash_in_wasm_builder() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + // Requires the `metadata-hash` feature to be activated. + // You need to pass the main token symbol and its number of decimals. + .enable_metadata_hash("TOKEN", 12) + // The runtime will be build twice and the second time the `RUNTIME_METADATA_HASH` + // environment variable will be set for the `CheckMetadataHash` extension. + .build() + } +} diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 038076e10c5..8733ff9fceb 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -37,6 +37,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat pallet-babe = { path = "../../frame/babe", default-features = false } pallet-balances = { path = "../../frame/balances", default-features = false } frame-executive = { path = "../../frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../frame/metadata-hash-extension", default-features = false } frame-system = { path = "../../frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } pallet-timestamp = { path = "../../frame/timestamp", default-features = false } @@ -67,7 +68,7 @@ serde = { features = ["alloc", "derive"], workspace = true } serde_json = { features = ["alloc"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true, features = ["metadata-hash"] } [features] default = ["std"] @@ -76,6 +77,7 @@ std = [ "array-bytes", "codec/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", @@ -112,5 +114,6 @@ std = [ "substrate-wasm-builder", "trie-db/std", ] + # Special feature to disable logging disable-logging = ["sp-api/disable-logging"] diff --git a/substrate/test-utils/runtime/build.rs b/substrate/test-utils/runtime/build.rs index dd79ce2c5ae..d38173fcfcb 100644 --- a/substrate/test-utils/runtime/build.rs +++ b/substrate/test-utils/runtime/build.rs @@ -25,6 +25,7 @@ fn main() { // to this value by default. This is because some of our tests // (`restoration_of_globals`) depend on the stack-size. .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") + .enable_metadata_hash("TOKEN", 10) .import_memory() .build(); } diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index e355e5d099a..5ae0d8f8f6e 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -22,10 +22,11 @@ use crate::{ CheckSubstrateCall, Extrinsic, Nonce, Pair, RuntimeCall, SignedPayload, TransferData, }; use codec::Encode; +use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; use sp_keyring::AccountKeyring; -use sp_runtime::{transaction_validity::TransactionPriority, Perbill}; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionPriority, Perbill}; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. #[derive(Clone)] @@ -81,17 +82,23 @@ pub struct ExtrinsicBuilder { function: RuntimeCall, signer: Option, nonce: Option, + metadata_hash: Option<[u8; 32]>, } impl ExtrinsicBuilder { /// Create builder for given `RuntimeCall`. By default `Extrinsic` will be signed by `Alice`. pub fn new(function: impl Into) -> Self { - Self { function: function.into(), signer: Some(AccountKeyring::Alice.pair()), nonce: None } + Self { + function: function.into(), + signer: Some(AccountKeyring::Alice.pair()), + nonce: None, + metadata_hash: None, + } } /// Create builder for given `RuntimeCall`. `Extrinsic` will be unsigned. pub fn new_unsigned(function: impl Into) -> Self { - Self { function: function.into(), signer: None, nonce: None } + Self { function: function.into(), signer: None, nonce: None, metadata_hash: None } } /// Create builder for `pallet_call::bench_transfer` from given `TransferData`. @@ -105,6 +112,7 @@ impl ExtrinsicBuilder { Self { nonce: Some(transfer.nonce), signer: Some(transfer.from.clone()), + metadata_hash: None, ..Self::new(BalancesCall::transfer_allow_death { dest: transfer.to, value: transfer.amount, @@ -186,6 +194,12 @@ impl ExtrinsicBuilder { self } + /// Metadata hash to put into the signed data of the extrinsic. + pub fn metadata_hash(mut self, metadata_hash: [u8; 32]) -> Self { + self.metadata_hash = Some(metadata_hash); + self + } + /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { @@ -193,9 +207,15 @@ impl ExtrinsicBuilder { CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new(), CheckSubstrateCall {}, + self.metadata_hash + .map(CheckMetadataHash::new_with_custom_hash) + .unwrap_or_else(|| CheckMetadataHash::new(false)), + ); + let raw_payload = SignedPayload::from_raw( + self.function.clone(), + extra.clone(), + extra.additional_signed().unwrap(), ); - let raw_payload = - SignedPayload::from_raw(self.function.clone(), extra.clone(), ((), (), ())); let signature = raw_payload.using_encoded(|e| signer.sign(e)); Extrinsic::new_signed(self.function, signer.public(), signature, extra) diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 370aa0034fc..ab87db0e700 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -149,7 +149,12 @@ pub type Signature = sr25519::Signature; pub type Pair = sp_core::sr25519::Pair; /// The SignedExtension to the basic transaction logic. -pub type SignedExtra = (CheckNonce, CheckWeight, CheckSubstrateCall); +pub type SignedExtra = ( + CheckNonce, + CheckWeight, + CheckSubstrateCall, + frame_metadata_hash_extension::CheckMetadataHash, +); /// The payload being signed in transactions. pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. @@ -494,14 +499,14 @@ impl_runtime_apis! { impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { - unimplemented!() + OpaqueMetadata::new(Runtime::metadata().into()) } - fn metadata_at_version(_version: u32) -> Option { - unimplemented!() + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) } fn metadata_versions() -> alloc::vec::Vec { - unimplemented!() + Runtime::metadata_versions() } } diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index bac323e2e6a..090955494f0 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -27,3 +27,34 @@ filetime = "0.2.16" wasm-opt = "0.116" parity-wasm = "0.45" polkavm-linker = { workspace = true } + +# Dependencies required for the `metadata-hash` feature. +merkleized-metadata = { version = "0.1.0", optional = true } +sc-executor = { path = "../../client/executor", optional = true } +sp-core = { path = "../../primitives/core", optional = true } +sp-io = { path = "../../primitives/io", optional = true } +sp-version = { path = "../../primitives/version", optional = true } +frame-metadata = { version = "16.0.0", features = ["current"], optional = true } +codec = { package = "parity-scale-codec", version = "3.1.5", optional = true } +array-bytes = { version = "6.1", optional = true } +sp-tracing = { path = "../../primitives/tracing", optional = true } + +[features] +# Enable support for generating the metadata hash. +# +# To generate the metadata hash the runtime is build once, executed to build the metadata and then +# build a second time with the `RUNTIME_METADATA_HASH` environment variable set. The environment +# variable then contains the hash and can be used inside the runtime. +# +# This pulls in quite a lot of dependencies and thus, is disabled by default. +metadata-hash = [ + "array-bytes", + "codec", + "frame-metadata", + "merkleized-metadata", + "sc-executor", + "sp-core", + "sp-io", + "sp-tracing", + "sp-version", +] diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index 163703fbec6..37c6c4aa743 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -23,6 +23,13 @@ use std::{ use crate::RuntimeTarget; +/// Extra information when generating the `metadata-hash`. +#[cfg(feature = "metadata-hash")] +pub(crate) struct MetadataExtraInfo { + pub decimals: u8, + pub token_symbol: String, +} + /// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. fn get_manifest_dir() -> PathBuf { env::var("CARGO_MANIFEST_DIR") @@ -53,6 +60,8 @@ impl WasmBuilderSelectProject { disable_runtime_version_section_check: false, export_heap_base: false, import_memory: false, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: None, } } @@ -71,6 +80,8 @@ impl WasmBuilderSelectProject { disable_runtime_version_section_check: false, export_heap_base: false, import_memory: false, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: None, }) } else { Err("Project path must point to the `Cargo.toml` of the project") @@ -108,6 +119,10 @@ pub struct WasmBuilder { export_heap_base: bool, /// Whether `--import-memory` should be added to the link args (WASM-only). import_memory: bool, + + /// Whether to enable the metadata hash generation. + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: Option, } impl WasmBuilder { @@ -191,6 +206,22 @@ impl WasmBuilder { self } + /// Enable generation of the metadata hash. + /// + /// This will compile the runtime once, fetch the metadata, build the metadata hash and + /// then compile again with the env `RUNTIME_METADATA_HASH` set. For more information + /// about the metadata hash see [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). + /// + /// - `token_symbol`: The symbol of the main native token of the chain. + /// - `decimals`: The number of decimals of the main native token. + #[cfg(feature = "metadata-hash")] + pub fn enable_metadata_hash(mut self, token_symbol: impl Into, decimals: u8) -> Self { + self.enable_metadata_hash = + Some(MetadataExtraInfo { token_symbol: token_symbol.into(), decimals }); + + self + } + /// Disable the check for the `runtime_version` wasm section. /// /// By default the `wasm-builder` will ensure that the `runtime_version` section will @@ -237,6 +268,8 @@ impl WasmBuilder { self.features_to_enable, self.file_name, !self.disable_runtime_version_section_check, + #[cfg(feature = "metadata-hash")] + self.enable_metadata_hash, ); // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't @@ -311,6 +344,7 @@ fn build_project( features_to_enable: Vec, wasm_binary_name: Option, check_for_runtime_version_section: bool, + #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) { let cargo_cmd = match crate::prerequisites::check(target) { Ok(cmd) => cmd, @@ -328,6 +362,8 @@ fn build_project( features_to_enable, wasm_binary_name, check_for_runtime_version_section, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash, ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 9ebab38b9cb..07de4c15831 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -116,6 +116,8 @@ use std::{ use version::Version; mod builder; +#[cfg(feature = "metadata-hash")] +mod metadata_hash; mod prerequisites; mod version; mod wasm_project; @@ -238,7 +240,7 @@ fn get_rustup_command(target: RuntimeTarget) -> Option { } /// Wraps a specific command which represents a cargo invocation. -#[derive(Debug)] +#[derive(Debug, Clone)] struct CargoCommand { program: String, args: Vec, @@ -350,6 +352,7 @@ impl CargoCommand { } /// Wraps a [`CargoCommand`] and the version of `rustc` the cargo command uses. +#[derive(Clone)] struct CargoCommandVersioned { command: CargoCommand, version: String, diff --git a/substrate/utils/wasm-builder/src/metadata_hash.rs b/substrate/utils/wasm-builder/src/metadata_hash.rs new file mode 100644 index 00000000000..1003f2d18ea --- /dev/null +++ b/substrate/utils/wasm-builder/src/metadata_hash.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::builder::MetadataExtraInfo; +use codec::{Decode, Encode}; +use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; +use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; +use sc_executor::WasmExecutor; +use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode, WrappedRuntimeCode}; +use std::path::Path; + +/// The host functions that we provide when calling into the wasm file. +/// +/// Any other host function will return an error. +type HostFunctions = ( + // The allocator functions. + sp_io::allocator::HostFunctions, + // Logging is good to have for debugging issues. + sp_io::logging::HostFunctions, + // Give access to the "state", actually the state will be empty, but some chains put constants + // into the state and this would panic at metadata generation. Thus, we give them an empty + // state to not panic. + sp_io::storage::HostFunctions, + // The hashing functions. + sp_io::hashing::HostFunctions, +); + +/// Generate the metadata hash. +/// +/// The metadata hash is generated as specced in +/// [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). +/// +/// Returns the metadata hash. +pub fn generate_metadata_hash(wasm: &Path, extra_info: MetadataExtraInfo) -> [u8; 32] { + sp_tracing::try_init_simple(); + + let wasm = std::fs::read(wasm).expect("Wasm file was just created and should be readable."); + + let executor = WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(); + + let runtime_code = RuntimeCode { + code_fetcher: &WrappedRuntimeCode(wasm.into()), + heap_pages: None, + // The hash is only used for caching and thus, not that important for our use case here. + hash: vec![1, 2, 3], + }; + + let metadata = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Metadata_metadata_at_version", + &15u32.encode(), + CallContext::Offchain, + ) + .0 + .expect("`Metadata::metadata_at_version` should exist."); + + let metadata = Option::>::decode(&mut &metadata[..]) + .ok() + .flatten() + .expect("Metadata V15 support is required."); + + let metadata = RuntimeMetadataPrefixed::decode(&mut &metadata[..]) + .expect("Invalid encoded metadata?") + .1; + + let runtime_version = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Core_version", + &[], + CallContext::Offchain, + ) + .0 + .expect("`Core_version` should exist."); + let runtime_version = sp_version::RuntimeVersion::decode(&mut &runtime_version[..]) + .expect("Invalid `RuntimeVersion` encoding"); + + let base58_prefix = extract_ss58_prefix(&metadata); + + let extra_info = ExtraInfo { + spec_version: runtime_version.spec_version, + spec_name: runtime_version.spec_name.into(), + base58_prefix, + decimals: extra_info.decimals, + token_symbol: extra_info.token_symbol, + }; + + generate_metadata_digest(&metadata, extra_info) + .expect("Failed to generate the metadata digest") + .hash() +} + +/// Extract the `SS58` from the constants in the given `metadata`. +fn extract_ss58_prefix(metadata: &RuntimeMetadata) -> u16 { + let RuntimeMetadata::V15(ref metadata) = metadata else { + panic!("Metadata version 15 required") + }; + + let system = metadata + .pallets + .iter() + .find(|p| p.name == "System") + .expect("Each FRAME runtime has the `System` pallet; qed"); + + system + .constants + .iter() + .find_map(|c| { + (c.name == "SS58Prefix") + .then(|| u16::decode(&mut &c.value[..]).expect("SS58 is an `u16`; qed")) + }) + .expect("`SS58PREFIX` exists in the `System` constants; qed") +} diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index b58e6bfa36b..ff6c8e38a33 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "metadata-hash")] +use crate::builder::MetadataExtraInfo; use crate::{write_file_if_changed, CargoCommandVersioned, RuntimeTarget, OFFLINE}; use build_helper::rerun_if_changed; @@ -113,57 +115,103 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// The path to the compact runtime binary and the bloaty runtime binary. pub(crate) fn create_and_compile( target: RuntimeTarget, - project_cargo_toml: &Path, + orig_project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, features_to_enable: Vec, - bloaty_blob_out_name_override: Option, + blob_out_name_override: Option, check_for_runtime_version_section: bool, + #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); - let crate_metadata = crate_metadata(project_cargo_toml); + let crate_metadata = crate_metadata(orig_project_cargo_toml); let project = create_project( target, - project_cargo_toml, + orig_project_cargo_toml, &runtime_workspace, &crate_metadata, crate_metadata.workspace_root.as_ref(), features_to_enable, ); + let wasm_project_cargo_toml = project.join("Cargo.toml"); let build_config = BuildConfiguration::detect(target, &project); - // Build the bloaty runtime blob - let raw_blob_path = build_bloaty_blob( - target, - &build_config.blob_build_profile, - &project, - default_rustflags, - cargo_cmd, - ); + #[cfg(feature = "metadata-hash")] + let raw_blob_path = match enable_metadata_hash { + Some(extra_info) => { + // When the metadata hash is enabled we need to build the runtime twice. + let raw_blob_path = build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd.clone(), + None, + ); - let (final_blob_binary, bloaty_blob_binary) = match target { - RuntimeTarget::Wasm => compile_wasm( - project_cargo_toml, + let hash = crate::metadata_hash::generate_metadata_hash(&raw_blob_path, extra_info); + + build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd, + Some(hash), + ) + }, + None => build_bloaty_blob( + target, + &build_config.blob_build_profile, &project, - bloaty_blob_out_name_override, - check_for_runtime_version_section, - &build_config, + default_rustflags, + cargo_cmd, + None, ), + }; + + // If the feature is not enabled, we only need to do it once. + #[cfg(not(feature = "metadata-hash"))] + let raw_blob_path = { + build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd, + ) + }; + + let blob_name = + blob_out_name_override.unwrap_or_else(|| get_blob_name(target, &wasm_project_cargo_toml)); + + let (final_blob_binary, bloaty_blob_binary) = match target { + RuntimeTarget::Wasm => { + let out_path = project.join(format!("{blob_name}.wasm")); + fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); + + maybe_compact_and_compress_wasm( + &wasm_project_cargo_toml, + &project, + WasmBinaryBloaty(out_path), + &blob_name, + check_for_runtime_version_section, + &build_config, + ) + }, RuntimeTarget::Riscv => { - let out_name = bloaty_blob_out_name_override - .unwrap_or_else(|| get_blob_name(target, project_cargo_toml)); - let out_path = project.join(format!("{out_name}.polkavm")); + let out_path = project.join(format!("{blob_name}.polkavm")); fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); (None, WasmBinaryBloaty(out_path)) }, }; generate_rerun_if_changed_instructions( - project_cargo_toml, + orig_project_cargo_toml, &project, &runtime_workspace, final_blob_binary.as_ref(), @@ -177,25 +225,14 @@ pub(crate) fn create_and_compile( (final_blob_binary, bloaty_blob_binary) } -fn compile_wasm( - project_cargo_toml: &Path, +fn maybe_compact_and_compress_wasm( + wasm_project_cargo_toml: &Path, project: &Path, - bloaty_blob_out_name_override: Option, + bloaty_blob_binary: WasmBinaryBloaty, + blob_name: &str, check_for_runtime_version_section: bool, build_config: &BuildConfiguration, ) -> (Option, WasmBinaryBloaty) { - // Get the name of the bloaty runtime blob. - let bloaty_blob_default_name = get_blob_name(RuntimeTarget::Wasm, project_cargo_toml); - let bloaty_blob_out_name = - bloaty_blob_out_name_override.unwrap_or_else(|| bloaty_blob_default_name.clone()); - - let bloaty_blob_binary = copy_bloaty_blob( - &project, - &build_config.blob_build_profile, - &bloaty_blob_default_name, - &bloaty_blob_out_name, - ); - // Try to compact and compress the bloaty blob, if the *outer* profile wants it. // // This is because, by default the inner profile will be set to `Release` even when the outer @@ -203,15 +240,9 @@ fn compile_wasm( // development activities. let (compact_blob_path, compact_compressed_blob_path) = if build_config.outer_build_profile.wants_compact() { - let compact_blob_path = compact_wasm( - &project, - &build_config.blob_build_profile, - project_cargo_toml, - &bloaty_blob_out_name, - ); - let compact_compressed_blob_path = compact_blob_path - .as_ref() - .and_then(|p| try_compress_blob(&p.0, &bloaty_blob_out_name)); + let compact_blob_path = compact_wasm(&project, blob_name, &bloaty_blob_binary); + let compact_compressed_blob_path = + compact_blob_path.as_ref().and_then(|p| try_compress_blob(&p.0, blob_name)); (compact_blob_path, compact_compressed_blob_path) } else { (None, None) @@ -221,15 +252,12 @@ fn compile_wasm( ensure_runtime_version_wasm_section_exists(bloaty_blob_binary.bloaty_path()); } - compact_blob_path - .as_ref() - .map(|wasm_binary| copy_blob_to_target_directory(project_cargo_toml, wasm_binary)); + let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); - compact_compressed_blob_path.as_ref().map(|wasm_binary_compressed| { - copy_blob_to_target_directory(project_cargo_toml, wasm_binary_compressed) - }); + final_blob_binary + .as_ref() + .map(|binary| copy_blob_to_target_directory(wasm_project_cargo_toml, binary)); - let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); (final_blob_binary, bloaty_blob_binary) } @@ -347,12 +375,25 @@ fn get_crate_name(cargo_manifest: &Path) -> String { .expect("Package name exists; qed") } +/// Extract the `lib.name` from the given `Cargo.toml`. +fn get_lib_name(cargo_manifest: &Path) -> Option { + let cargo_toml: Table = toml::from_str( + &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed"), + ) + .expect("Cargo manifest is a valid toml file; qed"); + + let lib = cargo_toml.get("lib").and_then(|t| t.as_table())?; + + lib.get("name").and_then(|p| p.as_str()).map(ToOwned::to_owned) +} + /// Returns the name for the blob binary. fn get_blob_name(target: RuntimeTarget, cargo_manifest: &Path) -> String { - let crate_name = get_crate_name(cargo_manifest); match target { - RuntimeTarget::Wasm => crate_name.replace('-', "_"), - RuntimeTarget::Riscv => crate_name, + RuntimeTarget::Wasm => get_lib_name(cargo_manifest) + .expect("The wasm project should have a `lib.name`; qed") + .replace('-', "_"), + RuntimeTarget::Riscv => get_crate_name(cargo_manifest), } } @@ -379,7 +420,6 @@ fn create_project_cargo_toml( workspace_root_path: &Path, crate_name: &str, crate_path: &Path, - wasm_binary: &str, enabled_features: impl Iterator, ) { let mut workspace_toml: Table = toml::from_str( @@ -443,7 +483,7 @@ fn create_project_cargo_toml( if target == RuntimeTarget::Wasm { let mut lib = Table::new(); - lib.insert("name".into(), wasm_binary.into()); + lib.insert("name".into(), crate_name.replace("-", "_").into()); lib.insert("crate-type".into(), vec!["cdylib".to_string()].into()); wasm_workspace_toml.insert("lib".into(), lib.into()); } @@ -588,7 +628,6 @@ fn create_project( ) -> PathBuf { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); - let wasm_binary = get_blob_name(target, project_cargo_toml); let wasm_project_folder = wasm_workspace.join(&crate_name); fs::create_dir_all(wasm_project_folder.join("src")) @@ -610,7 +649,6 @@ fn create_project( workspace_root_path, &crate_name, crate_path, - &wasm_binary, enabled_features.into_iter(), ); @@ -775,12 +813,15 @@ fn offline_build() -> bool { } /// Build the project and create the bloaty runtime blob. +/// +/// Returns the path to the generated bloaty runtime blob. fn build_bloaty_blob( target: RuntimeTarget, blob_build_profile: &Profile, project: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, + #[cfg(feature = "metadata-hash")] metadata_hash: Option<[u8; 32]>, ) -> PathBuf { let manifest_path = project.join("Cargo.toml"); let mut build_cmd = cargo_cmd.command(); @@ -820,6 +861,11 @@ fn build_bloaty_blob( // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); + #[cfg(feature = "metadata-hash")] + if let Some(hash) = metadata_hash { + build_cmd.env("RUNTIME_METADATA_HASH", array_bytes::bytes2hex("0x", &hash)); + } + if super::color_output_enabled() { build_cmd.arg("--color=always"); } @@ -908,23 +954,16 @@ fn build_bloaty_blob( fn compact_wasm( project: &Path, - inner_profile: &Profile, - cargo_manifest: &Path, - out_name: &str, + blob_name: &str, + bloaty_binary: &WasmBinaryBloaty, ) -> Option { - let default_out_name = get_blob_name(RuntimeTarget::Wasm, cargo_manifest); - let in_path = project - .join("target/wasm32-unknown-unknown") - .join(inner_profile.directory()) - .join(format!("{}.wasm", default_out_name)); - - let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name)); + let wasm_compact_path = project.join(format!("{blob_name}.compact.wasm")); let start = std::time::Instant::now(); wasm_opt::OptimizationOptions::new_opt_level_0() .mvp_features_only() .debug_info(true) .add_pass(wasm_opt::Pass::StripDwarf) - .run(&in_path, &wasm_compact_path) + .run(bloaty_binary.bloaty_path(), &wasm_compact_path) .expect("Failed to compact generated WASM binary."); println!( "{} {}", @@ -934,22 +973,6 @@ fn compact_wasm( Some(WasmBinary(wasm_compact_path)) } -fn copy_bloaty_blob( - project: &Path, - inner_profile: &Profile, - in_name: &str, - out_name: &str, -) -> WasmBinaryBloaty { - let in_path = project - .join("target/wasm32-unknown-unknown") - .join(inner_profile.directory()) - .join(format!("{}.wasm", in_name)); - - let bloaty_path = project.join(format!("{}.wasm", out_name)); - fs::copy(in_path, &bloaty_path).expect("Copying the bloaty file to the project dir."); - WasmBinaryBloaty(bloaty_path) -} - fn try_compress_blob(compact_blob_path: &Path, out_name: &str) -> Option { use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 3e1c7e4b325..a74c6a541f4 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +docify = "0.2.8" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ @@ -36,6 +37,7 @@ pallet-parachain-template = { path = "../pallets/template", default-features = f # Substrate / FRAME frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -104,6 +106,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -195,3 +198,16 @@ try-runtime = [ "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", ] + +# Enable the metadata hash generation. +# +# This is hidden behind a feature because it increases the compile time. +# The wasm binary needs to be compiled twice, once to fetch the metadata, +# generate the metadata hash and then a second time with the +# `RUNTIME_METADATA_HASH` environment variable set for the `CheckMetadataHash` +# extension. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A convenience feature for enabling things when doing a build +# for an on-chain release. +on-chain-release-build = ["metadata-hash"] diff --git a/templates/parachain/runtime/build.rs b/templates/parachain/runtime/build.rs index bb05afe02b1..4f33752ca6b 100644 --- a/templates/parachain/runtime/build.rs +++ b/templates/parachain/runtime/build.rs @@ -1,4 +1,12 @@ -#[cfg(feature = "std")] +#[cfg(all(feature = "std", feature = "metadata-hash"))] +#[docify::export(template_enable_metadata_hash)] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("UNIT", 12) + .build(); +} + +#[cfg(all(feature = "std", not(feature = "metadata-hash")))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 179a425ca04..987b88af844 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -86,6 +86,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. -- GitLab From ae864e6abcb41f845aca9a5602162f1618567285 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 22 May 2024 15:28:35 +0100 Subject: [PATCH 050/106] Update README.md (#4502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ..and other high level docs. # Polling Please vote in the reactions of this PR - ๐Ÿ‘ I agree to replace the website of this repo to https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html - ๐Ÿ‘Ž Keep it as polkadot.network - ๐Ÿคท Different opinion --------- Co-authored-by: Bastian Kรถcher --- README.md | 111 +++++++++--------- .../Polkadot_Logo_Horizontal_Pink_Black.png | Bin 0 -> 189152 bytes .../Polkadot_Logo_Horizontal_Pink_White.png | Bin 0 -> 178754 bytes docs/mermaid/IA.mmd | 3 +- docs/sdk/src/polkadot_sdk/polkadot.rs | 4 +- 5 files changed, 61 insertions(+), 57 deletions(-) create mode 100644 docs/images/Polkadot_Logo_Horizontal_Pink_Black.png create mode 100644 docs/images/Polkadot_Logo_Horizontal_Pink_White.png diff --git a/README.md b/README.md index 63743a456f4..0b1d3b6084a 100644 --- a/README.md +++ b/README.md @@ -1,81 +1,84 @@ -> NOTE: We have recently made significant changes to our repository structure. In order to streamline our development -process and foster better contributions, we have merged three separate repositories Cumulus, Substrate and Polkadot into -this repository. Read more about the changes [ -here](https://polkadot-public.notion.site/Polkadot-SDK-FAQ-fbc4cecc2c46443fb37b9eeec2f0d85f). + +
# Polkadot SDK -![](https://cms.polkadot.network/content/images/2021/06/1-xPcVR_fkITd0ssKBvJ3GMw.png) + + + + + -[![StackExchange](https://img.shields.io/badge/StackExchange-Community%20&%20Support-222222?logo=stackexchange)](https://substrate.stackexchange.com/) +![GitHub stars](https://img.shields.io/github/stars/paritytech/polkadot-sdk)  ![GitHub +forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) -The Polkadot SDK repository provides all the resources needed to start building on the Polkadot network, a multi-chain -blockchain platform that enables different blockchains to interoperate and share information in a secure and scalable -way. The Polkadot SDK comprises three main pieces of software: +[![StackExchange](https://img.shields.io/badge/StackExchange-Community%20&%20Support-222222?logo=stackexchange)](https://substrate.stackexchange.com/)  ![GitHub contributors](https://img.shields.io/github/contributors/paritytech/polkadot-sdk)  ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/paritytech/polkadot-sdk) -## [Polkadot](./polkadot/) -[![PolkadotForum](https://img.shields.io/badge/Polkadot_Forum-e6007a?logo=polkadot)](https://forum.polkadot.network/) -[![Polkadot-license](https://img.shields.io/badge/License-GPL3-blue)](./polkadot/LICENSE) +![GitHub lines of code](https://tokei.rs/b1/github/paritytech/polkadot-sdk)   +![GitHub last commit](https://img.shields.io/github/last-commit/paritytech/polkadot-sdk) -Implementation of a node for the https://polkadot.network in Rust, using the Substrate framework. This directory -currently contains runtimes for the Westend and Rococo test networks. Polkadot, Kusama and their system chain runtimes -are located in the [`runtimes`](https://github.com/polkadot-fellows/runtimes/) repository maintained by -[the Polkadot Technical Fellowship](https://polkadot-fellows.github.io/dashboard/#/overview). +> The Polkadot SDK repository provides all the components needed to start building on the +> [Polkadot](https://polkadot.network) network, a multi-chain blockchain platform that enables +> different blockchains to interoperate and share information in a secure and scalable way. -## [Substrate](./substrate/) - [![SubstrateRustDocs](https://img.shields.io/badge/Rust_Docs-Substrate-24CC85?logo=rust)](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/substrate/index.html) - [![Substrate-license](https://img.shields.io/badge/License-GPL3%2FApache2.0-blue)](./substrate/README.md#LICENSE) +
-Substrate is the primary blockchain SDK used by developers to create the parachains that make up the Polkadot network. -Additionally, it allows for the development of self-sovereign blockchains that operate completely independently of -Polkadot. +## ๐Ÿ“š Documentation -## [Cumulus](./cumulus/) -[![CumulusRustDocs](https://img.shields.io/badge/Rust_Docs-Cumulus-222222?logo=rust)](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) -[![Cumulus-license](https://img.shields.io/badge/License-GPL3-blue)](./cumulus/LICENSE) +* [๐Ÿฆ€ rust-docs]([paritytech.github.io/](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html)) + * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) + to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM +* Other Resources: + * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) -Cumulus is a set of tools for writing Substrate-based Polkadot parachains. +## ๐Ÿš€ Releases -## Releases +> [!NOTE] +> Our release process is still Work-In-Progress and may not yet reflect the aspired outline +> here. -> [!NOTE] -> Our release process is still Work-In-Progress and may not yet reflect the aspired outline here. +The Polkadot-SDK has two release channels: `stable` and `nightly`. Production software is advised to +only use `stable`. `nightly` is meant for tinkerers to try out the latest features. The detailed +release process is described in [RELEASE.md](docs/RELEASE.md). -The Polkadot-SDK has two release channels: `stable` and `nightly`. Production software is advised to only use `stable`. -`nightly` is meant for tinkerers to try out the latest features. The detailed release process is described in -[RELEASE.md](docs/RELEASE.md). +### ๐Ÿ˜Œ Stable -### Stable +`stable` releases have a support duration of **three months**. In this period, the release will not +have any breaking changes. It will receive bug fixes, security fixes, performance fixes and new +non-breaking features on a **two week** cadence. -`stable` releases have a support duration of **three months**. In this period, the release will not have any breaking -changes. It will receive bug fixes, security fixes, performance fixes and new non-breaking features on a **two week** -cadence. +### ๐Ÿค  Nightly -### Nightly +`nightly` releases are released every night from the `master` branch, potentially with breaking +changes. They have pre-release version numbers in the format `major.0.0-nightlyYYMMDD`. -`nightly` releases are released every night from the `master` branch, potentially with breaking changes. They have -pre-release version numbers in the format `major.0.0-nightlyYYMMDD`. +## ๐Ÿ” Security -## Upstream Dependencies +The security policy and procedures can be found in +[docs/contributor/SECURITY.md](./docs/contributor/SECURITY.md). -Below are the primary upstream dependencies utilized in this project: +## ๐Ÿค Contributing & Code of Conduct -- [`parity-scale-codec`](https://crates.io/crates/parity-scale-codec) -- [`parity-db`](https://crates.io/crates/parity-db) -- [`parity-common`](https://github.com/paritytech/parity-common) -- [`trie`](https://github.com/paritytech/trie) +Ensure you follow our [contribution guidelines](./docs/contributor/CONTRIBUTING.md). In every +interaction and contribution, this project adheres to the [Contributor Covenant Code of +Conduct](./docs/contributor/CODE_OF_CONDUCT.md). -## Security +### ๐Ÿ‘พ Ready to Contribute? -The security policy and procedures can be found in [docs/contributor/SECURITY.md](./docs/contributor/SECURITY.md). +Take a look at the issues labeled with [`mentor`](https://github.com/paritytech/polkadot-sdk/labels/C1-mentor) (or alternatively [this](https://mentor.tasty.limo/) page, created by one of +the maintainers) label to get started! We always recognize valuable contributions by proposing an +on-chain tip to the Polkadot network as a token of our appreciation. -## Contributing & Code of Conduct +## Polkadot Fellowship -Ensure you follow our [contribution guidelines](./docs/contributor/CONTRIBUTING.md). In every interaction and -contribution, this project adheres to the [Contributor Covenant Code of Conduct](./docs/contributor/CODE_OF_CONDUCT.md). +Development in this repo usually goes hand in hand with the `fellowship` organization. In short, +this repository provides all the SDK pieces needed to build both Polkadot and its parachains. But, +the actual Polkadot runtime lives in the `fellowship/runtimes` repository. Read more about the +fellowship, this separation, the RFC process +[here](https://polkadot-fellows.github.io/dashboard/). -## Additional Resources +## History -- For monitoring upcoming changes and current proposals related to the technical implementation of the Polkadot network, - visit the [`Requests for Comment (RFC)`](https://github.com/polkadot-fellows/RFCs) repository. While it's maintained - by the Polkadot Fellowship, the RFC process welcomes contributions from everyone. +This repository is the amalgamation of 3 separate repositories that used to make up Polkadot SDK, +namely Substrate, Polkadot and Cumulus. Read more about the merge and its history +[here](https://polkadot-public.notion.site/Polkadot-SDK-FAQ-fbc4cecc2c46443fb37b9eeec2f0d85f). diff --git a/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png b/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png new file mode 100644 index 0000000000000000000000000000000000000000..8909dc96a62a93f2bd8b9204e3f1a6885dffae52 GIT binary patch literal 189152 zcmeFac{o*l`!~GIWL6p1S?*76=-lQ(}Uw*57!)wdG{QAnNZpMH4^*@_b|7~lF zzgzzyyuSkfLnH|D*UZ)s%3tSNLnwbk%AZ2{8&dw1$={H&hD?yZA!QAz{0%983gvG| z`BNr;L&_R5`5RK!63X9@@~2e(hLk^L@;9WcA(Q|2LJHAglI$V}u7B;zocO5sFO&Z={iksL zYWfeE{59`CM1uVF=|5%iH<0}ylfTjX50M~$Q`Vm{`J1QzkjY@qP`*ZUE zn;mT>_2-S+dBpCGNrFn#N%9vHo%LPv0*|R@?wt)2_a2UI3)8j_ z*0>x`ePY{#?_oI_wL%ENyyjo~Cbmrf+8K!!``3n#OncZ(ZjgMMnN3&bRtLG@_QiW= z*zeK&sSNU)c{_ajS6%oqyURf*T4HcSKWsD0&sSe`Cuc=odSu3Zn3~3GG4G*tg(i9{ zN(@nS2EUts@RDI>j3G@-ZHwJ#SbM$1(fa#m>lYOm7h2y=gzEgPra%E=v%1EZ#>E|J zOT&swJrDNzeXZs7>0TM$GC}ET&!Kak=%t|RZxYWi%BO{hH|<>Nx&7RJNa5wOLrvVA z!T9F(Pyq@%_79OIOi&GRlDigsNhvM1I{m~yCcP2?yn}JIBex&)*pKh0)HO@HOx{?| zokb}vCf%@Fs9Yng-|m{*wO8{#(w`}j8_Yg{_OsFqY`R2V_|K+X*51fDiGHFaUPTG=s@fq=E!QmLZrAzn*A@h5=?P3GclGR&rm*y(bX1c>>hJj7o3_NJ z+r~zoInOgUFZFTflB;vEuQ!EhNt?+O-TbN_q;i2TvgLu`$w|!^&ie9uXXb_}%)vQv zpqnThaBU>3U0{kkF%Q^UzbW)C=p-vpTJ%xTOu7{Jn zK~hzr#6j6-d?)iY_X(&Pj6bobUwa2!A~}r6Q-wHPt~)a5d+k^X(K-0QH+x1km7*Z| zU2D%mG_=Uf?+)N{5zx#VLmth!Gw!^!lR|hPPIIADb>>$U?#-IQWOL5b?0q)%=4hl9D-Szg!eRb{AZv%4nJJAdy%Ph}>hdBy9|o%}^YoD8p7#-`i( z(TBFyZ!NC9|DdN`fx@KzQARYG~loG z=#x!3`?yV4^j5QTu}C$ofJaJ~TPg*s#EFq@%+!fRrFW~^oE*|`*2~^^nL2K*wc(x2 zc~s?uZLqS90g=L)25ukO!%g=PIQz?)C^>|@3o38o9dPS|-^5=^>(eY<@jI9c#NMK` zt{RzGZZfd}ZYOaLGiT;QH+oWt16Wx!lH>M$0qxN9|8%#&@yt})5vQKsq@d2E9jH37 z3P0M|p4N?SjWng}yjZ{01LEdRRSNPrMizIIwN2Nft$UTmI+Ky0DX;m>?#_Ora7B2{ z=TIyvLirPraO<#xr5mZl2bOs~yOU_v9F5=}kcDY~UvB*9HPwUGC)lyqdc{lKo#(Kd z*PemQjuS!M#MJNj%EOO0y3r;T4(tlt73x4?2cmVt@kex!SYD3m2VMPSaP622RZ~Oc z)((Sl6?+PEutH?n9)ub3pTuAGZKsoW@u!%Im&kHbgD8f|$rs`Ng32kGXYKL0#54Uhz;!&zwYOXsBf+uOZ)th{7NzVf z_wem;D+q0!Ojne0>~ik4NTZfpc_5G&o=UCzY4dX7v3G75b>c zpK?+#k$G-%KtQM5_$2!>Uz&!*qA6Yqd0{e%!al({G>ITeYaP`8_+RTGX|GB5T2XexVPu7K;I6=PN)Ws=Q3L3|?kYPcnIm$uG*7{r~VjQbSi+yl3h4uUW z$et(EmN={aQtO%ks6S{&#h#zMeU$V)pJ3$Ti>8orP@loKt{Eih3NW3^LV+Zr1es}7 z)VKeMQmOLZjfj7?LOp}Mj)I)D(G)c=Zdd39LK;Gn2q2_qln&c>nlGc#Ca*nlRu|-R zlehQbtNAk&Rwmfu+nvLJctfEh3z+Isq^n$w8N&Dk*

z%>Bm$PhtzAJO?O>miB`(M1MvL7;ktG5I&TXaFya|^}A50MwY1r#iOIy5t{M~PexFd%&xeG}@9gDXyc$o!q3PR@QK z8We0|nv71R-z3h;ycBNlrJ`~_(Y0sb0#Lk-+ei))j7J1N2qG(Q>$f^U z>6eOrRkX%~tzvU!K#)hMxDlIi|KME|%s)wi%<09dLP-7Z`H`xZkEU3`d)9ie_b@r{ z&VXt>(OQ0~u^A!tthB#%9}He=G`ySMf+A67C|f08s=U0NL)BltTU^b_d9FW#(hP5` z9Eh@F7ntV!$;iPJ3j7h9$l5#y+C1#-Zio1Dj|_gGG&Uyw_@hcldS`#MrHL)Pe1ee1 zR=VC!^jc7m$?yR2FmP#Je2)&tGZ0Hb8cCdF$z`CDJM6}c$bCE=>_TD1q_ajYMNodq z9I)vs*sAq@17h86lLFVD!;bdWD?GnF-HeN|jAg7O=;P zAWfENRq*ri7G&&IUosR0LKGGbWOGFq_O#POI;A}n)*eOFMLS2tCeSg)+kHRM`Dl8| zfI{NM+M>YAEkQuoE_)JXg35;x;nm45o z(&VYo$R}^8dWHsfaGKJnLu548bjYTwacey~a@XcQ*R@GRX33@C@JN?EBG24QVHA!@ zvd9$jAMp^F(!p;9l?Ya4rR}Y|i|pE~;+EAsgFx&F{618~Z2xi!J~EsA_P86YpxF8= zT%hZ2zMiTC9TE!Zl^7~e0)#Zx;UV}pH4n^Rpdcv%8F9!9nLVM#S$v~3YV0V0H!TD? z1ZI4FtJVi5#C}u``V7MqR_&~$QDs1I79~pSk*4HgN?v40*68NkFa_tEdPOOvo?w_@g_f+JlPF?N>!H5ZnxLZ18(vO#`?81yz2%#?#}%b=2?7h1<{RakKa| zhqsEf7@QC$V6V^}GjBal$9!Ky)|_L}R^@Y!ss3)^J*M{0Cg-eof1U{N$eIj~YKVC; zPu8Y=8$O@5oMpqS*^C<)~ijQ{DDm|6&c#y2*QVN|3tmrvN z0~8;eGZRY5%}sXI_a%@cT0WX<4<#y*H(L_;%7iFAO{GLJ)qg?p+8dy6-*KVfh~i_c zPtHFrX=#Y|`lba6uYHx^>ffW!DYMFmo5=*Y*Kp)xM;%~Ad@;-K4idgS3h zaTM}M(iO5=KWHwX>fDIlWO6l;KE#}}W$gOpC zoqZ7;vonylC!groo>igXz=uW?(C9ywQ^bfo#2%)wcPBYi$%iEd(jq4E_bJ%Qm^MfU z#Z%p@(g~r)JujPVBis+6!1tRV{4f+sjEP;eNVL5yrOnBaoxoC)hG5?}4N+KY7uPY2 zGelZu`MMR^2-5L91nV_Xt~BPvzxD*8dxeo7l!`I>In=m)J0dCZL^dAtIK|5UVpNnY zYYInAQ79Ghj%A`61w4ln{ zOZrf1DDDzRqw0+>hx!m~d(%4#8!L*Sqqwv5YsDix@qUQBzS*f<`n3mR>B;%i`_@-M zr0JR(g@OpHM;@i)>tr6uk^7w;u^)Qa^m^?Tk){YTM^Dx-12YP$6hzq+LmovS>+$?% zYNCu~d?|(E0k>&YkA4p$q1YMzXYGw~n=VO-g-{`kA~E?k3F;cbyCw5ny$|K^|^*rvx#*3I(24gX*tJ)uwD9*aRvJ2Lgh+~?w=xK^R;9;glx)XU)s!7PyZZL zw+V}%tget{vf^tmTiao@b5OY$NQp?QjeR0icnddt*z~qvq}$7?%AHpI+XlO_xrWJR zm-95ZmpvmbNmhIl@oDwjI>gyKBpwN|oC-c{Cf4tD%vv?qU~QJn)d9C?7d<(5WV5vo zqhs);MXva3wE`VLQRBkv_vaO6>qR0AFR>%ngZl6TmhTr7?Z(W+GQ3Q34R3IlgcNNY zmULVjf8q9_A~hb27LLcCSJE_P(=V0T6L-#NK3rjT^itSQDr{VCnNs%Y3vvm<3rpo* z#tSnSGXmD8(%ujf7r{K1R(a*-2d(9FvJ>M#_$4?$SnR|P7~lLLo;{T^ zp&vDPiphI}=h}@VVl#?k?QYxxe9EtmJy@|533cb7(Yvn(L#aD+Q??L{X{z{RrjFiNf(Afv~{t987njL8hvkVKO3Pg)HG4f5A@jvTqo{*?x4nXpT|aO{*@J@29ktlAN{-CCQ?OQUg5 z9QDZB2+CgymFX=2(Zw6v=k+ASy*BFX+}Ph4r!dPOW&=kbx#da%+no3@SUa{a!5;#* zi+fjY{OI5DY}eZ10?UQ2_cDQWeUPKZ@L{zgwE(|DohF0+kY=@eB{xE1nb^ztNS2lV zd3Fl1&Z2}-B(>arZPNPHP!)+Q)P?3nD5pXGGzn3W#oz`FM$c!x=UrU#oAVj=oC9m`_Z<8o`O2Ql0N5!Ij5ZtU`2#)TQv z^E`tpbJ_Z}WivW&vm!W$a4E~}&`<0W2|KL5!Q1M-!t5iCn$nho(cTF`?f8L;8J)FB zCGAAT>2U1afVcVJ#n$Wl7A>g-=+>Kn&7WixVeqLU%anFbB*G-|LhPM)c+2Wf{ndUs z9kx%sv%Xp|d7WNf+n0!{sGWwIGij(v$({A|Es-0DP0ZoBqhdGaQ}XqG`zV4KlB)D; zOV7PK6|;Pl$?f#AQRE_5zrQ$zb9`JL8ZLs1K?@=gZ#ZhM60bX3ebPQWAmO{oE`9H% zP-j%Ej67C+aT_0Q7CXJt(pmMYAa zrh9y6LaM%$>DN{>vtaksbqd`$QXLZFQga^J56u9f>4c2uwOo%&#$8Uvv~0T3SteI2 zI$1S8(nqJgF7(g6a2_Ze&`2iM4OdRKdG-c!I-^D^FeY8Xh^%~8VPJvQ!?6_gn0zfZ zkzn9Ema88?hSGR6>A74uPw~$aD{~&l^C0Ewx7ta%=&<)`MeORu$E&w_DR8&PQ%>@+!Q>JJmXo zM}xR7xFlXEIwxaOn)b3X_)XyU`CFGoA`}4*Aw48{ZUc2=0<6-V#kxJu}R(#FJ<#bpvj)TV;cVE6jBX*eX2{?^m(Z+@n zvl`x7o`IO$g9CFD`d0?`W(}4_s2_Lu(aYLBvqafC#m+-Fo-K0;xr z=_`G?qhFifE@#7Z=jYo%LI1~)Zw`V)+d$<5b5CPzg2tt}G#-s|4bPt-*kN8Ouh9i; zISe+g%DMID(i#^4#W}X}E@leo zaO!k+gr(gi)nOPtXfvpEfcV0zJl^~|J0a~pAVM|jp-@+wg9X*<4aBtG7|oHd_hmX= zF}+5CMN|R5XuYB^yO7c46Z!624bY2?S+-W|oHZCNf+pL7@SI5^ePpp*YO9{OCuFsW zHqXpXsBlgw)V(v6AFQ2Q?4k~?^=0zzju);k()rW%3Jp+kXV4OEMQ~_-R$b58Yfd!~ zix^`Z?ZhHfcI2b>ZwIiiqJnquB-1I44XunC1*4B=699in;rZ$Q%#egBe#@OXjvP7PA`)Tu)TL3iZbdSJbh&+$_{qJI_pvGTAyG^k*^O<4M2v1R3Zn$iFFG9(kL3u)2M1HRs`^mU-YutGFigpXP zDWOz)%D#x$^kN`3(u&Vr$!;M+m70JB-73_r2uBdSa%x#Vx!f9*Wlc;5GQ&351v5GX zGdc%n9wO>kY0;|Pkgwkri#pVNm_l7ext2I|rSkguuO(Nj%P#+FR*Xj}xaaM1pEU6|PYjM3SFR@%Z4S6@$(}7 zeTz1Jl-!A$wWD6l6rua)ZppcG(;dqfdw;%b7rB_i)ijLNO;Q{DrE8LM#%5vtbCaCIhUUNmber;diTF*3~NhH!nX4Jxz@Zsrp9)H#>z z#@kUY45Bp?AqwsS&7_5Ui;)VoCBN)q`U!5Pe(XndC9|sH{7o_)xcz9eky#b;7Ol!U z`I8g05+)#}UlADF-V5-jAcV zbt_pBD(V7>_<5H^`jms)j@IzE#Oy(u4rvN3>pI%)hN#5)?x$-dn@n8@lSjo{xG-}= zw$A^^=F_j{&hz@2XDiJa?icerMP!GP2hL3z2~URmpW@_WDY`e885MdlE0~ihWcI0Fk5SI0Qk(t6DDE zWlB3~E4hZcM(P|`xj)~Dcu_VBmHyEPW$KEgNQQ}yq1!=o&?ZB0w=OCSFIP(pg?chC zdgnGy^Gq}n{kFLrkT}KqAi--(WOW)pw5`ymKR~&}W8jR-@{~Qj)@o;32`!amngGCC z1ow69;{N(jzZNt76`UzUKxvf2134olbj{~2{we*tOELK^4DYMmKr*Ji8e!e|lR4)B zMBOCl;ck^|8+A$COfE|AaoIHml^F8(UiPt>c$r!zL38<7U2HJ+YxxEJGgoVEj$u+Z zEy=7BL@=M-zSKDKc;&OeDo*GWfiO!Dwi}za4`&b5qtiU&wDaAIKscX}zWsQ+&M6cr}6=Td2Q94|(47%LI}LkcJ-$a@TEgpxhwqnGtXq+Z zAQb~bCEg8%ym99f0L(C6o%x~}^&<%tPAdQ>Vj+PO^ub(*A}nsXt{m#?L7QG2oiPzs(mXjfPfjmAO^pGlV6 z;=}b#p-NXOmIWVfJjq(O9kFf;VN3qp=9$|7lvMNRm^0_z*9quWhlKN>6iy&>G(IKM z@b^h+5^Wj1P^YcZkMYVI8(Ka{Cp3PCHor&a)5v)T_*1T+lRo3w*SQT;59{DO5Pdqi z>@*}7xiuPR{R5V#o=tBdzm;bN*~lLg%YJr}>Fq?KEUQGBWxB#_kg)nP1{a7X8kT%$9(-9_(M)uv+uZ8$XJh%0I zw^0=?An%KFI;e3Dz9u^SZt^)tO1Xc4W?CZv@8r9P00ozbdbo$Hd#?zCAE)ghc+Axg zEy~qP;b+?V7?*AWcG;xbEA^JWTyC&#sHX2%YbBUxkvlD61|T~;{wE=jV(n4La=Jx_ zF>mIAo+ZSrw_{MnwM2%8IRfU=h`wyacR1m(wcVI1^_bIvUYW2qXo9?S7({N(C~UyUBYa z8JlLeSMZKrCjX111K^1m1J0Pj7TUI+Z)?N@Z@V$UF)*_i1NQr&6Z6nq?qrM77zzMw zU`nX2n&q4&c8m5K&BU1!efA+}fNAftjd2i(sCHV2E`1f$VT4J+^a-(EQs{a#_jsbQ zdxR{XqjN> z4Z09ivd;H5DCGQhs5ZCJd?c9>!AV?H*c?yPo3o?b!ibk zG(0JVFVO3iPS`f6XKfaEzyA7sl99Swf5>X&Y!zPQ7N66^pYH7pBIiFG&ost~3R_&) znTOH77|T#7!|>f-xYAqA^O64E&C{AsteKI`<92DgB2-icj2*YcHqOzi3~h$+pn-`| zu+d*GZq-Aou1ppzZ_t@{h?x>^;Su}H10-eX#!R&;GG1R)tNV=Ta=VPS8sCn6v%q0j zAyc$!960CBV#50Lu@hvu;6~OT!!$4t&mcx&p4S&?_UOci8V_Ui6Fa) zS zIawT<6T8bxP{$k}KfxC~DHHEyH}b8-E8eKgsR9)sJjTQygUwc8tBU}q;DPI`MjA*` zFJu`g)O7d<*bgV%WfhB(_8|Vi1MDL)1i0_q(ABviooZk%w0Vx@TP2Mz719qeESPoX z5h|8iIOEW=jZqEI1g%Mm!lE(|MuteWw;Jb0I);QXNxLKOHS26vRrwt|ZZ!WGl&RRh z$esG@n=CEid+~4C5pktDO`%Z#FWOe~_vWDbNw_Zvu}p_?M8%FuX;L%7vl~C~ct)r2 zKX?G=QNku7;OkU!R2V@pB{ycokhBjY6^9dzylqha>}Bg5(_dA5T6TXF-*#_CN8X0b z;FjR;kQ#leuV&0Lai+e!op}|I^bICFq#~XMGtpt>kNjGbu5Oa{Bp#O=?kz8axHb3? z&$iZyFN`tt-Ix?~ZK#Bh$;WM1SVc?J58%<6-w%dS=g()?eas2=PDBiM zcSGfd;b$cZLBFNWUHfBj%<6w<*Ki6`g!B&q@a8!{ZRq{b(rw#QE9jE6K%C5=3dq*t z=;~BTU+ACGOb7y0tF*PwUlzRzJTDbNa*6R0w9{w@Mv+dzNFiCB34^N3k2?ZIk|MvA zJ9Z}(M*=AR-93``nB6W@lhU>tq5GNO_^A@!`P|vPx%7#vl81ok@w&imB&Bi%%R|oS}Ey@(oF=cjfi=VEl_;)^;$rhlR9lou;@lS|1ynogCg$|M=}J=lO`*U8kC&w$bB`QK9XbveQM zy~|K)xd7aX<-pB1bLUk)TCiK^WGh&+c6ba$1EH27P*E|te!p)R` z2TJ3Q4i(Obl7{ak-ViWr6GZ;u9=smkc7K?esN z7QQ}jim3GDotl&#adku2G!+(YSokvM{6YU`lbTD!i>`gTYA!$b3*{_P_jUW3Kev+L zqv`(JDz^7MU_(yZaSuK~xXdrEKXuBQ3E>|)7+|wxoI<1~STuAm9lQDHyvjGmmNTuD zhQ0>W2rlhX)B~`Jz=@3Pqq@(0b{>@ly64^ytkJMO*3SQ} zOx>aszR!fgbhv}hlMT`KIdZA+_g#R?Z#^Fw*0JEuF8EaL9C5AW1xBqUMVpUdgx6==R&CnwbHImt$7;-o|c+{sHn`TQ`34 zV25l0bt@wk3fV~!W!9F-LK9i9g`Lj5!00|pSUeOyBQ_G;8TjiKiFtaI%s~aY^7WA8V20qn2t-AFBd5cJB5T)KUt^SE z1Zl@l7%;gBPuV(d#HgMx)8>c-<+Q*+cfktH@P6nB8J^%FF!*qQZ|KedX%YA zYHY}@5It2y??X7_Cxx#)nr11r8zI_XjCvE_CU=1jccJHQ{9*6rcC>locw9HWoT<9l zI+DKoQ|hho`|CN6_UfBWD6v{w?Coj!e>@Q}l@NV$4AU_y#4tIMIt}rROe9eQwFBgK zENQrx>C#7K+MU!g2G}imrdp9&kAy3j2H?^bzn-EMG9Rtgkw%Api`V)vs^s*OqD{%d zZ5N|T4oC?;uwVawn%rUC)cf(8l2iRXpHlsPi@j;_urktr%DFQNg7OK8uPrX|8Tm)k z;schf+z@7BWAKBgFC%sMEzUXY^xt)gb?|X6Fz_`3RTM(b`yGIb##2 zmuO+WR85EZab1CRZmtbZx~|36V2bEtnwd7h)75$5R^VYe@{24KiZeIaTB@XIx!h&; znCRpnuz&%{i4(5M23!a)Hfi6X*@0ef?meO_;8MeiT;7&``Da|z1HDhMKw(Dc&yAlt zxD7bI)Rb;h!KW^&yZy(vVwUTl?7WXgX$0{R9m(E|kXV&F`>*pot(y;smpE3thsRz) zGn?U(l0L)yWv)(t@$9R)^O|)0^>b1PPSq&I(DvcLV3p z*w_8xA5v3Nr`<>Q_4_)L?icuvmo}<-{D;x9jhy(&c861L&3)d@JUKk8IO@`*N`DKe zO#cO=>s`BA9W#vldr89vk;i_@*(@0*r`UqchIMbpo^C4OcfDcgn?nORZr4Wmh(PYK zNw{>bvm$!Qk2gN5GuNfA&_O~p_5R1AX5-w0C)y9LZeUKec-hU{dR4X$U#P){e;PWD zTU-l;^a8wHqCR&)Ae-@g9rT=zZ^{Mp1p%>Z2<0Xa^`|J7a&)jb%B0cZaChB*L zFuDD6UZddY@DJk-=nk1*_PGrK$e95zrK@{ri;fR`bA|+NM?K}fUcM7U`%eG!iu+u; z4!foOa$+mO&rX#QQy2!$XN`6Oe(F1J`F@T=*wi^*qC=UOucWuIAM-`;`B|!x2R>cy ztgaO*S=mVrDxI!1a1W$CL&YXEuN@&2ehUcP$eFAQ>$E!H7kX}Rx~7RoRha9>Pw(Ef ztzBcJg1iTU8u`6=Hsi-mfltYAS}R1oo7Yhx39Wjc6Ub73VV?M7(@{SBn3Q^}BE;#D zOpTj-JKIlj501yT39VzRBo(wkZTIeC)B|WXf9^JLGEVcUGCmH={SXyx2uOB$8XI2} zn>wAVL*nA}+Fo)szRjvR;DpF83NDnp8;3Qj*+{N}uI-D>pJ|9;jBXLVYAXL^A7d(j zNEV9uVukr)_M9Owqse5lpyN=>R_m#d47(zx;>rC;Yh_#3ijS_PZp0M; z7RZl}Q1ZPytUD%6-*Pc({q0!e6En-Jn<9BeExpWcTT7=`kpz_X{f2;K64_Y!Jx<0; zBxY(zrSogujbDLEOo4+)U;4o34g0CwZ9X@i4#fJKhKF*(7exfVWKSBRvmib`SM;Ot zEimEFREyu5rbGM1nvE~2CI9kW0?Wumsly~WZQzFYHJ8rtA+0-a$F`pE1Kgw%?0U{3 z=CTV-!WJ6pimPGx_{Sgm5>Q z+#;+$BtQ4><9N)3InpI*i18o1EQJq?x8FujZyuhaKC)y!EPOlG_0o@+VS>u{H}}g- zsS&^%>)rce5p2k;5vF?ovjDeksOP49dh19c9@gE-+)|vR>hCBm8fUWV#5l8^XF6gT z#>h=uq*VzLf)bcC%einN*LX-im%n`iPb!zfSL5SXPDS80O8PYRHCP@SNp;z0U+b?I z9=aYR-OAcxjF40`@1l}EvJ6{x(kiEVv|ve$qdXehL>nx=Z+SPsIsB`MWj$#8&Y#%3 zT9z2V;i>lZ2$dQvBb}`spi*1#+}%I+V!lAPAy1Flyrx4vap}nLjUDxp;`~jDeBaYa zdI(PJt$i7TGb7?*FM}40>2c zGoeKe*sX;x)QYWX5WX#iob>l;&3pJve_*)>d%AEVB`Ps=^EXoBjhGxYgoa)gVO}#7 z@Ft3X4&#;KjR82nLpc?`Zn+pW9pA=qiUt?!X~x$xk}Ai7_8msKIfrBn)E0~I^BUqK z0_$q`a(cP+_Vb6nP%Gby+B^igXuzmEBd5w+&*zVT?3-_k z;N>tR(D~v00Y=mUefap$oL9C~6Fu)r2dr*Ne@%sBGR-Mh`r2;~a`9k9Np>^TxY(n7 z-9NcGd6Z?)joQDM&-3+haW1ZVRuLudK@#{4ROO6XTFXc4j(xvI-Z{gfk3SGOt`O~2 zniP1Fc+!(?+5OFau<%1t31$m1>bVTvN?kaWo_8&eqmQP|AIh8*XgEJmDmf4fV|F}2 zkd{ZUPW#2LLsXiNV!o)0qR`|c=-)i?SiO-;LwvZi_Yhq0rU2v6GW^o@Ow z^H$F@3o+0(G|0Lgo5;pa6lP+$Gv|EdLprPN`JFn75z0G^(_Ufy8x+6bQI+%TGTnxB znxDEgSR6H9MCs5b{}t~ z!C99al)CjPRgM*HJw4*!9FfAaUusmW`jmQ}Fm*p_r-9V+9X^dg@sWy02UfQX*4O<^ zRa4;Id>cc4QHY*8g`0-&#bglUu3p#^q}s5aRe4h(-$#G*y#gV*d&0Wk66*AUh#r}` zA)l`WXD{rLPIp;62lt#e^KhOTpHLm5!dWjkG%vip0%B|}iYhtNdp)t0_G%F4(Fv__ zDaIBBkJY_NsZo+6pTdqxrcKF?WQVO@)_%3ndxb{cSh;Y@sDQ??=Hzln|J%)VM zp;Tq{)M!toQEBV=u^6b(a9w(@?p`)l6&7m?JGkMMgXXQxzS%NJ>npAJXf*&{igg%;w@;s^F$Bs{YEf@(g&lHwEK6I0%iy#3X!uB78X&LD& zmXlFY1*y}e3V`%7Xp)5hF;>0YST5Y@7Xo`wJTchI{Zh}pw`em z@s$dxG|{XNyBPJj<;*poD&=@sRAvvtA1xWg(k${(fGwJQKOf!_iv=FC$^G*yg^0uL z39KhndQVLzV`Y?1xo{6ILnPgF=C~Qnr-o5iNX(@i_1}E%&-@^AxqqE(2bM3qlE1GJ z%_MntxFY>|iAI<+9|J-XLerPL06Bd@<@a+FWR&y!o>fgJ%Q&8R+QrR@=Nz^k^~H+( zj;mGAtyHa1he96%fG5O^Cfi`~Le6B9Q$mVi>mio`LZM;OM7IYr)3i-*9Zr)CaqIvhf84?t) zjUs0a_0}g36F6i(UlguiE*0MYYIxJ*JfA(W=vGd!m0P;A|KKUKJ&=sQDT}al9DTn( zDrzuwI>8IjO#cZ{+m848SWeFT98U2(VG@v09wa$ROC;nc;jIWgOI@6j0UeBXxg8KtRiNN1g`5=V% z$z!eGLd*ss&eOkQKfCX}!$;-RFRQKUeyL`tFwsY2i+8g#t+vN2+X^e1drx9SuNeKv zr*EN>-Vr@DVHq_Tl9AbRQsre&i1r}ZObfkv&X4ybk5lwtM@SQrhVC0_5XXbMmM(GY zkXQ4{_|wTgPgY<@;?Jn4==ip)4TP*zAIak>qPEFKfqj93=BWXzDG3)TdguGOA66&L zBP#ck2|50~@+d<_p0nd1o*;>Bc4zgWGrddSpV0Aq`Dg8xlX2yI?+V0KXO<=Ja`v|8 z{-80j$Sh5Tl)?A8Eg!*b4MA(T$OskcXmw)+^6E&&mRTVS^U^I} zeqE-@fo_#{x&dTpJ&q(F^V)Kpa-e9FSCDLzSM`NJL0+C4Bz#p%?ywFYoz7ExPfz}& zVzUlc6v8-0OzI-7X^&UDOltYZ4S^rh{cpRE!ruqc)m5{r%yeAcPRlo8X7aIxk zza7y#D1lDggOJ0ozNXpSY0A;bqI_iX+sUM_XzMVe{p51#iEzPU7C|X|IbVan5dHC9 ztqx^@4V{*0PjQiP4PCDwIh)WUj_6%%$7An5{1D&_Yf(!NVuuZ|mAw9Rzw>P?p61=E ztN5p(yE>3Ne+*j~G;D2-Vhi3ORipc<1&c=~Nx?~jPik?{#K50k6uvw4kUo@pd2kbp ze^Vsb`pBX&T!cFN<_HhcHLI^XHj-+%diStB`t13es;g|`4(moNuWAs-1Bvc9RVzPW z7ws#3vAm>+@}p_4(rhe^mRW;#T5~LOdOy^8n&uPJYc`#PlX2PVjuRKS{c8*%uNw&< z*eFq3Rs464aAxFf<<;8Mf)MC<;OL?K*fJlPKT-s#&42tNV`5N%@^4P3ML){mzaKs7 ze;zuK`sc~El_A#0?}q%a?U95uyO&tjYkfdd$E?{*k0})yGsCQ33z`piFRTainVTm)ObTe<&oa#N%G|az_5j z($iY5`I0vWI6a)_KUOX*Oh3G^V`ZU5S$loIoU`YIg9%mIRFQ^9KpKKv5GS|XSY#0S z5n23cNC-zZfJDxi%h_9~jU0LQME98m=8N5^Z~sga|0*d>(TuOo2jrxCH=8{qySlc1 zm1-CA{Tb9f2qA`VgObpp`8yX%3wS5n%|CQ; zuQx(08-G`H%PxTdp||hDQ6ucG012a`(tVnbJqHhWth;^No=Npm$JWI|t6OVJh}M+S zSsTDc=(8Qo1$8OU=VkDx|GA6fQ4i*5q9*{p*WiwEOrx4-k1Rf$sJ_ibBU3}|m;!>; zW@8bD7Mme{VPErt?pavj0=X;>x^PoWLn9NzpIW&ZI*>BkpR4^gELEQrHiILtwBPg$Jr6>$pm5XOt>I97@~W~1&2zaw#dWvEV$@R)h8Qb#e$D&z&m(`cb2dI zKtLeyx@My>9`}Hgn)|MW3PSwhKTFN6oZ|wOd&tO5dc^Z32-Ax2J9C z$|-_+}d zKj7F9_`<~P5{xGj=5rZj8Y-SXrmb29pPnr%TN+D@cB5+{$KhjAT&!aB6qwD67f4@X=kV zZ157N5?_=NU8Vvo^YDG8My%i_7RWm69KXf!<^@w&sTb=65yfORj0%f(`>YzDKOL%$ zOAy4t&&;g8RJdHYSqOE1G0kB8wJJ%hkJ8~&7Jl1Sy9(Mv zY5a|4MtjJFk=3E98tvtzJ=c2x5#$o3MF@dur8wEjJQrPPs%;l-IMu#AlNLFh6``6Y zB?+OYS~r5ET;}=c(m1GP@z81Q!C&3q3AJ+Ka5K%jS7QN9F3K;L-Y@>@hdC=W({nMl$DQ1nsX_!0T~89`mMj zdnQVEYsF(dr5qQqjg$s}Y*>?CW-`MYN1vbGgPb_Q29V`e>&{=u23m`USK=^)nACz_ zc_JCL!s63uMqP^3#C494FUd@sbUs>L(@zA!CXf~TJ#xmjEQ8AS1Z`Ct-Pt^{(2%Vo z+@FcDqGa=K-3IEYxM&e$h$&Eehq`(H**V{FV08WXri}>vAso~`TuY)l4@-^nO58~B z;U>1<2ybWBmiQepH!MCRUd!DGsRqo#6vdL`keT?GN4BAPmAJlnl#dG`)IkER)tGcy--F;yhtLu6{eikkr-uhkHXnSR?laYqngB;{>_NYp%?j^5B zmn5w_;p@f(M2$bvguctFcIm4o&>02aGnZ>L7}7@$%`X zD_hWM2-<}rHZ-B;k^E2|eIt0YU0DJFH6QUpuzOt-#F=VP<@GkDwY`BaF5U9u_VWD4 zZqM<5sma;4>IOT&4YJ#A-Xkmav}0Z_{H2BL*dtpJEKLHbE;_1H1wGmvUC)^;)OHbC zK$s^YqGv{rv&l48tP-X@kjoEz??b6Tj?Y#?zhWa=QZZ>tk_!dFMfvEd=aPm>TWFB> zeOug72aC5K7gO3g66_KIbJ}&L_m+`N&=`DCwwru%32!sJ&zzM-zjQ%9pN*0nMLsYu z9nAA-?x^F2dxv0tpSz*=pucp!0cC?A$^)<@?wju39SByCte1DzyEDnc!tM_Kd51X> z90X$Co;SZ@9>%;DtZl!TIdqRL(&@pwR86Z#CfkG7eefBHW52O*ec!WXtzNiW9pC1* zJ38>^bii}D?U4P zDuOtd(8x5FLamBZhI;ac)rvpOq^RpVpXQ&tM+G=-MsL33`j}NLgvk5%ZL&qmc0gLu zl{YzpPHq3^c*k8>`;^ToDqPw;FQ)`OWLfMfC9S*b)f`wk_&K3ZT4vjej)gPe(owzr zV`$SdTU*yPvAmbli<46g|NR8!OD+8c70pw5D*<8BVJko8B*TP4Rsh66oYPK(j~8xS zBI{B7gs;Im$P1`Ae|$Qo>sSXyM&PURczs@OHN=&?RO@@Vb#*%_Jk7ognvP2#(bij0 zJ7c1pmaAr@@TZloeB1o4xa>HD7<c|%TBDz;{6Kqv`ea~s`Yst{Rpe)unbIMx340T0 zk-#xH(`ONeT_jd5fy#AU-lKJ6tgzAt%inhaj;#iW0~2MhIY&@H#l3;Ep7Ut4&&oBU zE~Dz7UPovI?o$g;;WR-##7E?8m;4YG1^i-rydxVy$}(=tBCUQxji-bGk|PAAU-5e1 z8{;z1L?3QI#OTvn5+yk|;)kXJ0_t8Q9kB>`Q4F%;QI%NzViDR$rX4#FL?s!V>c&wW z{$gV^XX9A&O;$JIR0Ykwi6U9jz#ilTI9>ZBN%eD0!fKF2`qTIYz0UwU{dA%q3)2QQ zj^v#WktvF19C<*6i$%k_*)LI{XOf6(QfAp1I)oq-=|F`OK>_>+JxD_R?`GrU*ch=c z(RB|_Km8p(MLy#WL%SA*0sT%xnrEP$cewiPuC9va!ghhq)!TlW{C5{nx&Rpe zGOzpZ%4s^leNKICiuB&BB09_PZH@||M@Qy)4O1%+2HaUW5Mo2XFty5iQ>$9YQQH|b zsc^T|E=3*7TnA#3f5rZ#p$MZ=Td~LNcVgM<*Yi0Fg=#nnI5x1DvzV>HjNOe7^=lDC zI+ak@cmYBC8;QXL1dU*8L$X{f!2AZ_i&@?~Hlj5?0I-R+U{K6{Xl(g#vNvNrq}^Xdo8t#sG_(qoT&6lHi2u;j4y(poup|(`SFe-`Yq+OQ<{dwxNEZZD`&|jacgg z6v1Z6S~|ik^C&=tp7=`!YOa;Y>5q?oIRp0RRW`s1a2@a1&(v&j0;ekCo}M(a_! zjP<{<&ZQ8Oj4fvCz$ZyUaE9BwoNM!70q*EbLw=4&mxudRZJzy^&Q!YxJQ)$P|jR&e@xvFDWLYazPMO?l0hqRgNt<_fkM_ zargQX?ymWJPZ~A_4Z!zzCr~u0Z(y?SGiT&pFq{^st|Z!@z~2kr`IZSO?9UVi#&8Z0 zkznmEkh=75(ywtasgrBw{-1mKX0AM~KD(0{N!`b&)gltp4Guqvw6*k3_`kB4(s=*uP4STX`;4i~l7y%GgD z>#x`+y&LI+3}kU&rx`iS2TlZY55j4V;{_>y8%PQA!-oL4{b*!G^S!~|&*$zvAWI0@3h3#@dmuH#NrzvxE5v{BWb^w?#}TX> z%1WvtGm|#EnlRkE4;;EVnyF5+`>{z!*V=Ws3_Pymp(!xN~cXd>bZgQzS@Y-J21ZaFyQ>kFlTHQt5x=wHafr6w1XUA zpGYD;VyXh7j;ru=m5js^@-df*(cYK0jKKHF3UI{%wczUjpknDYv(PYw&`h1=9{gW? zy>~p-?;Ah%PYGdS2K4dVSu$-xfBPafeO0tfe<}WN3ytEboMf z(--#vM@z4kxG>X-oW<{xQ0dt_up3pO6!MI(ku1c=>D!=4-V6LU~&knD>Az03ZO2*T(&rVv9NdLIgM4 za^V6LiZ1{-=f*iJ;z8|e8YQqRG=x~ zW=+?iXq%;W+;`^_vp~~|4jSe2GMD&?W7CzlNvV;n|Jddac?9*Y6qd4XM}0o#U~?Gw z!jZi#*6ZBFpdgedTx7>^d(V-yz@)Hzp^N+>LNBDM#DVcsio_^ok<*$H6;iFw zvu-b)`kgZJk~hz*B_qVoOt&XORj7qd{e(Qr*F}lZz+*k|w(bs4R59q3yd~+vFFM$9 zL|Sb+FKy-tIuLk@c8Io^841Jf)$|4E9m!1uY3)5RbxF!KAfP$j2eZ$ru(ZR3JYD2) zV^7pW8L<4FJ{8(Dn#G4YDT)TP*s_!;%b`%A1)JTxdBPR8zPqO z%!(7@(NYaK^E=?oasCsp2jJXH{N~h5J;?X8njAF*hc8bhU=+kqWbabfZ|rUqCx z%~*`k2+B^btYdp}BvgQPltwU2^(7&(e;AzW{cqrZ`CQ7AAFEtzi~S9Gw(H-pQm(NY zR>-OVvjTpIOq9;BV2@CKE2L&g&Gq;^AAj765e#-8=@{Podj;yjy&a7C&s1Hx9X`kK ziThh04ffwS;wwB>a_r#kBrV-nz!bin2Co-WU}wCV`h8%Up-<1>ikJY{u5MTGU8%{3 z@DnEY`|oS>{ixdR(|7OZ6Cjh|N-k+76;W1a$)dzoYh65MbxW#WEF?pHySpQ30wEqF z5na{hj>#l`dtUMQM3*4b+f5F!zu@}L@M!Fth`!_-{SLQ|AfYM}?=rsZ>_(YS-~muG z^rRgo@21vRa#Q%eRV`}IO^!TxEN;zlrq&yqjAtvN%h{oFf>HOd$m@%H<9KaPP;9@3 zyv8@+r-{CpShl@RS#iyUDT~er!d0;|NsB@54I%4__PaQ{ymAHLX)nm+K|AoGu0-Qx zZV)>&6erQ355l)3Y*T=)=7 zV-gd6-1GVC+vB>n&e^++OYy_OPFTvJq!B(4Owe|3$kcDNUK0|1IW;Y-ay&u1qDQnSr! zD3+_!ZM}Dgh?@tDOG@x%?R|##R>Vjr=#^;*rK!s4n5OSlJ&Y7XorUms7NLS&Ax%p+ zQY=$cgOXMr7SEZl?k&*4eV^E_UN8rq%l>kllk)F+A9(UC7=?{Cm2vN(&PRHpcWK7p zK~E&w;KfK4_uO3Z7XjRCp7Q=1F_Icg%rq${bFX?aF0&3dR(HV8{R*%?n>*}?u39a{ zkRuz*3=||rJTg6b;^eKATM9GM3ap3SS5D^serpr3(Y>o=E6|y+7FhiA4)?X2FhFzf45sDIb1nbh!!Kqf=94r7RHgcwHB-g&ZonkxWD~1kn?OG z<)k<@oWqnFn`Be`GE&}6msCFMSUwdgcCy{|Bg9I7Znp5e1A5KA8`(U&Y}AD`SrSXw zR-H3u<31^4`cYM(7pw()&dtU>`JH7ru%eO~+DKqRe882)!jA6O{a71Yk}e?pRz9Y& zh7*rdaG2A#yLn(d^XhKQRmM~~_8tVyIpB;%iIW*l%j+%lspr@Hv zTyORwXw-9M*69~lKi?K8>X6Y#3R69xPkIu>rwKV##$67~e&|sZsV^r5NAYFyaF$@T zc8I>9@ao&G+~T7)?yX)l?%$4Icm>DTCiTP7$XH1#K_GmJ{!rkbvKKu!+qM~u#_Eec zEyMtvZI=P-=fGK*UkNOer`^eRA6=N!B7@q{Z8b0o>rh;|YKFTjMp5Z&RAT(VUmaWj zM~?QT$T)}TjZET_9$(^sT_xjA9xUOkxvk=Q=jCn}t;?Vt@KC0E4wIwi!6u2FqJw9I zdg^^+#a;rVW-k}YkE5UENs36#!KFV>vH~3~q{sW8RqAZRNG;9Wo^it|VIH&_&+m7+ z(9#5g?zeGYN##4bcT?iRRAk#m*yC7>t*q8{>kV!fRBpTF8bpVKN1Rdcs34eL+l!XbNP9B%f@_=9y#A zGi~w542~grHaXc!ZpSodtOPOp88|k(BTdDMP*e~qEND+(_i{OzKG$W$&;4s9+L=)1 zUg5Ha+2mo{F|g#8vLw@}lKo07qs9lnt!aM&JKY8q#De-OH(RO14}VOL0dCfw7bICb zvquLv_WppK_8uANfrgV4R8Mw4juHD62VIbsupS#RU)$MX_h;YN<;_WmvV=iP!5HY` zLEo0908kb0twzoxF@~!k;K~kBba*oeCi{%aU#Y}n_7dpSzVjrRS69V(v79whIE0bD zKHw$Q|fUC9`DHqL@olTFW zM@9O~P&Qk=+-)(Y>(b{sGHW^nQq+Fdkul7*{C35+4Yn|A-}g@rx4BNUuk^i41`>P4 zpJJnN-(?z3`5AqrAF)RaWh%Y4@<5(~v?H>vB#mWA`?YeLT-CGX8QsLgzG;ePR)NZs^3t@XgUx8Z&`J^)ROjP%v^XWBmW2*E!% z$;%3cl08wP@B~+`|wGDKCkotJ73ef!RYK;(yIKBDZ+x%@A?@ZveEUjPypC%5OSEpk?Gs z*zB=5PgPBNy9TAU(t!2!SQnqW9NFh#X)WxgW=ehU@w<$n1eoFN^^M?o0w&XG%?9>u zj2L{O1K$4Z3UC7G8I3zA2k$Gu#>{jDo_1)xmZ{`q`^kXAPD8k(@F*JKpZg?s%0vV4 zaJiU=uuf?wHr9GStswq4fvFZVSlInAQiU67X5v}JFu>D+{g+x z9mGL(;WO1bx~~rMYakcC7B9%W1Y!b;T*yMrx!D_qr?*10+9puqpZ6sA2sGRPd!DwY zGVz4i@ME*pP8K)(r{g)3=~Y^|7lSi|N?3(A>iY<=32B0Wei@c0OZMw}@uUWr)<6`e zKB)?ZQ63XAvV?VirwU7jE$Qq&f+kw7)}Va5Q#=ReX!*v$)Vw;Lu>rv5 z+!cti9(tbJrwmxJEtA>L3rtr{pQ&|G>`)ncQ=k8_Gh)GLy94jR7ROMCh?tgIq@(wg zeS=|#T}|k(I4X7aVI)mXws*%Gofe&LpQc&2l+jeyikvohQPzvENHWmt3WZ@(5Mk=* zzAe!-slu@}0XrmBp#Fd!`|tZWwhuCa#*OXMHJ+Mdb-qzamKA}9bMV`<<3=7KI?!NR zyjcfk+?m&qF^uE6)Xk4gD92e_ODiaSq+f2IO|Ne{ZZ%!m-ptc9yYP7zyc~w4dE3sI zvze8Fjje+JuNNirN)r=a{PEhmU9Y}oRpAiF28z+{+F*AK(^92$^dFx<0G!OV5{JT==fuV&)^dZYif}somx#y~+hnMS3qbHlY$&{x2eE#-vFMgz8mTg!YtMY_3Bl zp<9W>JSLA%1|>txSXnA4chK{zmi8h-5AoOpi@SooIM8YNm9|fvC4LWS& z^NL%pW~2~a#Z(;>Z|+i6%t7CJs)L@Q-y(1Rr*=xJq5?5$M0mc5&2{GHu?49JD8`oE zKsMxerSUU%VM`u`Xv>9zE07&76hjN|9V}RvcFS9<*UQ|#*P6;w7}l*wUt}7n6dTmN z10oZg-*<64*+qk^pLx}5p{=wCOPm!${yCyW{JQX2NmGpC4*sj{mXs`s^@O|LC$<3?Xi1G|7Ovk&*M0@gBqG({^wpo2 zL6RB*4v=@bsG(*Ee`kcF-JQ;e5eESRlFq>0Q9@pGMi!o@{>0-3 zFr@0!%6l~f>NgxruGo^I%FMBBP5j(d=SK#yQDn?yv%$Nt4D|vwxbk15hZeHMMi^hk2S|`I&!uW`FnJ*&879zC@-r>icjJ_Xp15JV=}i8xWXWZl z=%Pwco5|TNqWfkea8Rj5MnVB2(YqAqwVtUy6!Mo|rx^MxV?(x?cUNz;_4^UQU^$$^$;SP4Q2q{qhUORqckeS95A*!YJKH zxgw~C%@eh`$r{EImdB83LOf(A^!SyQ%X4iH?qOc>S?ZRcj&RQa4?eq$Wqr0&FU5NJ{Sm*gw%w&9S z$P@7H8j1%G{j!|y!74;MyY*pN2uwhE2*;kkBJ*h@PhumSCUt`AHiFp1PF5Rh0OBDg zjZHTf6DYX@g`=z37_+zJq3F~3+^roL#kamkl|m01lEK>%J;-)rvm^ zMY3klTg^EHq^t0FZ);l2-pBeOF7tV^`+1BK3)e-sy(P%&mGe2x#ik(G7%d&9@A9@} zhqU*rwQR%<@~cn#5Da7Si)cas3@eKQk^TC*Yni0eYoq}xTC?1}2NdR8y&g;AE>%R*rK>DNn2mth=o*)pe z``qRqH!IwN_&=r3iYbkB!-(i{LT3^LDdK<5WM>te$?vR1b%Hz!$LDV;@e|~W)v+$k zdtNVh@qMOIKPJRPU`}0-RG7FewFe2v5U(ql!y8xQ6T0Eq9XK0!C`&AuB-H?CYtvzZ zowewsA!T|O^2=zFgw9|y5ze3`m6@mK#N&@ROkG5DNyfZNi@Qtp-E?h}hI{UBQT2(G zI5yAi$6>r|y;pnOzY+%|z`;kBhI^_3l(>Cdk~$gIym7+==WGQ-un^^+)!-B4&}rZE z_QTVkms$jTSl!LclfowiQ46B(7gdERJ+`hxDXL#}R<`I&q%f{WyC8}&APkJM7P++^ z;gLxiy;*N?j{a~8do?b@3b+G zjZOP=i*GqcX?n>(v4+Cs_KA48>8$JtftHkA)Lu za}-bAv^|i_<~z{mANaVBBLdkUYl@%jf9N8mW>NPP2!W@k2jNfRj+)Lf^Fh+$E~7J$_GgAM&-ps{Hk}ZV+NWO#&Ed z;}39wlYWg(8q%!M++w`y+5yz@vG9DTvVDh#yrjlf(k+{D;(F7Pi?3LKBPZLotKi#C z?$U*X2eBF6y)V^eB-gkwg5S0c9DpFUg{=D6CU*Y}8Z6HR?BPObysZ5h_N>;` zxJ>hRc+yv|RZEz=`Y~pL4Sli4-n>2fKJuwdfmu-jKnJ78*kX-%5-NShh*9*xg7ddQ z%$DR!d(BNKk^Og6^>p_DjB8mmC9OE@(Vml>e-+T8@KaIUE={2h-FPkOfycCyuI!n*zg zZGI=yX=hde-BppVhvP(PAi%(qO<<7dNz*o~??v%Id9F$NSRT_VI13o~6Am`;^fZrw zHg!35cu~f*T9mXf`%uxBAor3q#tF@8{ttLKe84FHd;PvX*=|nJ%z5$%Jj0eWrBxu% z#&nx(VypwWhf7{iJ^tpfUCCZ2OF45VFhNH@(8 z&`G{NpQ>pM_H@-_{vEUD2)RC;2=fZQ6%Nnp>4fe?EAJLJm=IQ zZ>!|%TlZqj)&gX$^tL`ka#AM-xk~y!jh6p`E0a55shTzo;1F&i)rf^GOo8Dm_H}Z5 z-c%Tbf9+=XQuWVxdLNC>pFmu`F-^)+d`tISf%Wv#9+wX~RC$cg+MSh{j!)T7qslaD zoS5uhfP{@^GMub$Gl29h{hQ8k|p3{lNy16&Qu?EPj&=k!%pcep+1D@bi{7C;?2{vLK z{y}b(9hP}8I`Y0@o8L{u7y1!kGUApPft>Fq;t?NT=y3J} zDT5xATdrb>yV^{9H7`Rx#Dz5IDdQr}Eqo_j^{?(`AvUvOJ8MvEjGzmTPXULa{=vmS zz_^-!3pnKM*>=X4n+_@qMbHxtzEN34T;MRjc~op;-djpdT(?m1YQgJWbB(}DAh41N zvMRrbqxc-X1Xoh|JI-Udg002c(5=7JSCFXxoMPC=-&fB66rZea{w8Pn9sKLk+K~P7im!T1$u3#^Dj~rF z;r;$%mq97c&BwqmNR|mYpp!e#`@CJ>1~yE@>@{thvMxrKQcMA5Ym~8_52*xyK-Sd- zb~`GEmsM1BpN%^M0`F^B!Wc|$jnHi44RDbhJIcDsQoQ(VY=>3+l7=~YpVZ=gZ_}^C z#)kl#_WIRtxUyZL60-U&ixU`ta*QN7rF&IWnQ9b&Q&D>_$qgK5mSKyt^+CbFK*CRw zlu@`+K2p#4M@V&itOH?uBG@B@uYQ=&h8#FHau3q@VC>|FVQJG1$<%ZYvxF z5>8LFC^RP`EzMw5#Vt%m6JXdz8`y%42*wc|IqztMW3>*seQe~NM%g#NS*!d*qc4Fy zlV)SvmRszkbC(3+kX0wMt4MbE~@M=`CjhIob^YMEvOQlZWN zv+{UI(fEpm8`l#9R9?HY0uQLBl=nmE6E|5YPGwl}`VbK2iXxpmn6Xiex2 zwM~KGzYqgh(y7JL&-`F5@`8KaH+%hVB$Y%pxL06=qn@z>obfZao%GX#`cM&gKCc&r zt5D~r$F?8aF25JFXZlul(0FKa{+2HwSPDB7eNgnKa`~JY3h(^>>|iQ94m&ja2@L%? zI~!P;sKVD2&>-sQV93)iHwt5C3ee zR%$>ShaXVTRkQ%shbpu=Cz5a^J*1_Au3ot5^xH8QHk3aHHU{G>*)|=!&%kPpyKvOS zuDE0JQp65Z1RMHx0S!a19J~A7!N2k+SYM{0^PoliWqiPkgZZxX7#A`y294TbSVX8T z7U=5B3I&cwzcIVOWatT{$jgi%Cx-=3+6H!AWM#YU0VY;~ zL@l5k89YaUQAmIeEp`KWI_0zmJjDlz2+bfeQruq{h!4D{zYc5kYkO@bR6w(I869?H(h$mEt37X3KgC_zz|&aLs-hx}65wiz0FmUo@`F7c8t9a0k6Y z9!@Zjrp#1|mh{Yf6@4z~q^Q21y}%{AbIe|d41$;zWAeBg1(h>XA99qk9Fh%OX!)D# zK~)lbgn^WG7NuNdZIaX{__7627Pv@Lzy)St0oUN~u_;q+Ozjqsla96Y>5MyJ>`)TfezPnlikz#XbHRc3(PAy$M9a{y8pWt)9tuLVxD($XA;uaoB z-Hc89-*c?pB{NJ@w#!AUxI^aE6=wMye(1|Nkg95mVCWZ?|AC=nH|;^g(x13yzedy! zRX9gn2U(|kdk@&XfSnL>{C4CVXQMDG?S=;M$TM{~Vn2g4ACu>W@G4FBbL7uFoaH(gopMbzPoMv0~B)H{SSTTMPKWc|9Ju z5anvJZhElVRvq4&p9VP>03ZrK6xyJ${mp5&Mm>zM9OW-}h{>M+|0r%iNC-v64LnM@ zfbwX!{Ay((F)b2_5$V7Ho{kz%;3|IkCw9*7!eNA`ekKd&W>w&;Ml862bBgW9pzejc zvDj`Cz_oiPu^C*;f)ss)`9~RQ77w|AZwrdJR4tlriIJqzb&%MZ*MJdhM^J9gO&nZw zG$=Tsa3^Tb`E{7~c4X&ugEi=Yr3tL^B^!6|91s!JK)2?Xn~cs<+0$bBzd(iHC%b{8 zl+Ex@_<8~!$OwTfP2pEUWKI4Su-?4Dl+H*j(_EdFBz!MLh7)YdYP$jyvxHtHFi#U0 zVc|#U%gyUiHt^SV{*7XXm7_GAN3KzI=$jT0(ZBH67UViFLV~@?hcs0oFkIZY8N&jV z^O%|E=uLOik~%?$P@K&1mbr0~^aBFGw7~>-&L7h34h|pzgA8_@x*D>Y{^%ETH*(y%sga(*n{id z1|k_?wUVMKI#1|i$C0!*vR#n6KVT+9yxmkLH$Y-09E8;_uu>ZXI5{^UcE)-Wrw4>V z>H~$bl>TGz$JtjIDUx$d7!eg>cTU+hfli8o1eb8`dL-@>m$B(lrU?f*P{xiyBP(l# zaUx9o3}~yU)Ndl9jz2vFt_U$0@hkvVhKJ}~mm=yz16w;=593aLrNK}!xl4nNhSX6Z zH2k9+Nj+esc+j#6XS$b`R7qa-XZiFqragzB9E-8I2pY2#UU=w%+b6<*3&Pz#7~UNn$M)aXV)RlqT0Q_kpDZsFFeC8oNAC;V3}rj=a6~23qQo{@?~@Gtk=P zUdy0EV`@%I>fgi`0m!e)VRMofuEW{G;(}DgqlDYiaE5uRmRT1+x;q^>OuonmE&x4{ zMuznO8HV`}J=Q)n0dU6*X-r3oFTSXC7yJ@y!FgIVuJ$0bet_Vt@}#~Ui_vHY^Q?%D z1XeG^%@hHh|0xM8`rMTGo|ZCy5s6(tnCC$W@EM7yhFU1_0MAnaMjBVRHm>s3tn7$K zVE&H|xbziuc+?~-J%KJ1Y*`qRI4`lQPLb=*0So-6wuQO`*hb^vVB;oIB;@9Su-Fc8 ziV_Z9ayDn42qbZk=IilaZ$e@5aVed#N5dF-da7&>Ll@n(egi7bRM3i6aQE5>ULH;s z7#_o3Ujgu6Z!iEJ@SuclU)-M6WbwItC0^^sp$$D7L@`su4K%B6a>q>IW{t0syCGJr zhb?GM>4t@Y^>>k)(!K<+IyR^Bpe$YShhtRFG4LdaOWn}=f6!ikA;R!Y2vVpR*d-A2 zlJo}5^hXkP@#9VIhB3V%Z|?7)q2%dOsYYX~`Yc;g^P+giqsyrvTPhrb1mezmCeFVs zFyg+bwZ9l2%E3~$Js264$@r&%(uN}16MFi4yihzkeQd}2E0bp*%h6!6D2Llyirt(k z1(vz*Q3xag7@;IGUv|}`2JaT1*8pzw9-`w9L>)67Vwm}lAI{RmKubC4y?*)00M)oP_Q7H{X zn$Tqn$<2e=9z^I{K@pGX&o@Hv6A9`WH=EoWj_R`Vq@m$uE2F8yj~uzII=+#uBOI2# z6W_HxAqAl8|B%?;Nh!OiG3l}HCG{5vQRL`A_`CT`4$GCvcZi*`mvc9=KYQW|^q}sm zxflk4qbt9u>~wB#l)5|F1ytVYT~o-06I@OQ_WrMTgzWi0h!TN5MsQvo6LK>9QFLQs znwZ<3r$nU3>=XP%;E%zVFAy z2&`5S{*ol6jmFoTX8Qn|jKydwP_4BXA>-eQadi6k7PrC+L>}z`g;Am*MVE5a8#kZD zJrz;7_p`6C%e?6)G$7X=(b}+oL+!gc9D-cCbF1-sO-HzThA{naMRNvy zl*@*XLr&~ycUUWOD}hj~58krg3j)*0q#36Nf(EZ{=g?r3_&!r#0gBB8G1L$Ad30~&WEf)rsea(}M9Td;f5&_S&45K6u{AtA?cVNRuR$_r zI`OWogO=ybH(PI7(q_F&h02m8iH!Mr(5e?P^@-#d4laa}JVBQOG(MM!ea;yv$lcKc zP&NJqoCxuB&yRc5zKU_wq~R~Yf{Fd|f=b#lXhe=;o?PWRNdiI10Mj1YofCGhstBQP*QAIg82kf(>yb^Zi_-SF-&Q$0M->XNrg9r#luk1P9?^)ggQ z4x^|c!?Y9nL6i9tpk0&=TsXR}P9e7o;CNE{b#A;haBU=MluXjUnROS6#gZ;MknR}bV~fe8LOiU*Wm0A0tJX7_!~???yk zYtj(XBHN4szsq9hBcQCW&UOIc&}*aT64NHodh?i$a}8b-zIDW~r+i*@j9@%H^9)!A z%FKRzBXje--^y+%T<>`c08)xR6beDj_7!B<3`cima_h<`=g|>8hEg zuuY{@-4*&ZXYzgHCN?3VNw{()`Z7quT2XsKSNDovXnEfdaf6S^>RB@NJx};#gvxpJ z_vg3iN$8bpW79bxYF`bR_7I`oN3Vb8!h0D8B-+bIW9;#QR|9Ct1{=InzGSnk$J=Z5 z9cENvJn>ZQ<`V$I{98@`O-Y@;0_7+Dz5A^;|{u zN-+gnUN=Yl}Ayn`q2Uvs7%GRpm9I$5*%wzcK{xIqRp@O8qKUO3p*)e zd($y_z@jeWuenD(O4?5f5D1qTq zdg1&g0EjMjSktF#0+|2I`9mFimQYhLKezv9ha{Gs5KT+=WBYcJCtOpiSvSfHH}bXo z$L59#BJ5$NC*i8jOmzk5BvL!-*k@J@MOTkgMd2D%*~LNG>wUztiLNa>0HrH9oZc|2 zFT+j}S_m?rM|xsm#LLgpV=cxm0O$-Wk`m2euFiM8yobE#unn$5RFK3c|6Hr$VY)5S z07ADQfIZcYvF?uQhSd+e0pA$k8m88E&F5i3#nVCfXo*hm&5A-EqQNIo+*nAzkMTK6 zBhMrpAy~*X{tk;5EVcV?0^XX*emo*zYeD@{2m$aA{5Nk9H%0t^QGH4ZQxcS%H8=qQ zd_TTSH~XpS1Rf(O%;gaGQwK%zPLQFuJ1ji^=j%7m#f7J=e|3(90;{q5KxJrZ%M6csuLuxibp+2O8RNR>E#C z3#O@B*bSmcESGF!R^5ZyaIsxzP0%?YL!n7DtF6WRbVSkqx(Gq2+27 ue0bZXXlp zHI!mcC`pjc;_!)w!0Dcr((wHN==nVeoJ5%h9GQE{;#knBVjKq7DlLOtJxTlO_t3P- zSB}<`K?PM6g+)R`AlGXnN9K2KqZ-lR2Le+7a#VjU=}Y1suPtc!(AkL1U`cwd0oXtT z((>^XUz&84!ZuiYnx<46BU*blA?f~OBMImN$mc=!gH2n0if(Yk4r>Q0*NdJ8Gl!?G zG_p%L{3Cf@CH^H6hn|rfg6z+&J8DznOv3KT75)9udhV7g5P-!XCaaIpo(4Y8+?qkKUnU56n|{l zX^G>L;SMT=uYtFJW!+m%Q)Il4owSweq8TibE(0L4+K4DeY=LI}j&pp#So9Pb@f#WFXoHVIPpb}yjx%q+cJnlEtVj6XvDI*_}oYHnH6zp?t}+B zW|K)sVuCg2t0djHa$oeN9VD83@b1QHf`FAddp+;`u-+6-B#fSbgap_Wg`~%cgDmi4 zZq43OM&V{r{=slJ5SfvI_gV5epYT3#0mRXBvW#u=r68TVgro+v0dZD<{a6X${(^d5 zA5l>x{G+SOrwrus?`huw|GmTP+1~KI2WYVjl(P7VGmtvc^In5D0beeDj@2DgZBs*o zewybuM_Rr$W?j9#ER1sM%~HHHef|#s{LhuSc8t!3%S_@5zOXXq2##2L+B28neM*8qG;^FPU=|b)kp5IahSj5;4e{7z*CWt za!1QtRrj3P92o?EmOaykLc^c@jWTA)VMyJSAxtIrvpU{EL*ZyN+`MZ?-X)V4Ym?j! zM4naIA?yC`e`uJm-J~Te2^Gp;zhg@}rcjb_KjX3pc%|K93I+8}cjD5_axFj748u4t1Q)1;D){C*U9c==DX0vFxN;x17d<5Fc(d z`=s4x(cK88+RaptNFVvzR9lD3&xk4%2y&Nx85ESDj{VfZCS%d!WsI^9nZ46dfe`(4 z8!OOVr#QwUBN+3BfQ`vkR8#s8SO*@jtF0;IuNO=-d#ncv3=cVAQn+C-9R_Sm9koal zZ)y57;_i!>+neDBkNOWopNx&!d`S`)r(3+h7R)4aU7U5pnaxpHW7cLh-U%tdLA((+ zuF`td1|87Wp=Y+I4vg`Deq<%yIS+HY#rTxnJ>x=;orh8bNuc!=n_F9o9vwzLF!!7o z{b@VSE$pT3R`W#0wo7{(L2_-%_z!!WvBMC-9;J7UYqH$^uWJ?WGk_U(c)4s1RZxHr zLL1_;sU7}5`;tnui^G_*?Rfk;l~)=no~Go#h!CzME6fV>zs~b*lI5TTD^V` z!VhHTXB`QbKfzhqFEZJSf&T-9^WtO1WsWQkunekLAjEUcBksl>;u+HGF2wP92}Fvu z#ZOrt%T+^9!tw5g7a4UXUPlAy`4@(NN!a6MY4lACOw+A7fpbe$44Pvv2;TS}uUgyHLa-7W zNIU#Z=PO*f_}wCHw_tnkLDZ>@c$E5?Mv)@M$Uea+3eG(>P|ljk1E4-EzhMVrn4Odr;6aB14)O5g zN83Oo&mf$Mp6+4oXD(PPlb)3kMgEBz$v9kH*pEO#`QH^sZIW6K?d}s{Ar|$AX!6g> zJ`zEg!L2d(hck^ISccV8v2lZoH_f7!c4>oK00yBdT!kejeX(13`vuPolD2;$y30X^mt&Zws77$7xj-b`T#)W&Y&4)L4XbcVGon9t zY1Leq!cF7rsg-pYc+UZ&15o^!NQ4;huGEUb=8PjJ4jW6;lxw3YHNtKfuMP>nNqMPv zDPVtr+l4N8CG1-KK7t}8JfX_}h?h!c^=tt7weh&R$lFGVV)^cHZrJp12;?u~;6#r+ z3>cgWh{FS>kAG>K{rp=zCs{T1F8qCVPermcz4F{p4rB+le@=_+P0dprraVQ$sAne8 z(j@MMY#U_=pB3q_8xtNp;oeeU$Cavi0nVhXt00C3sGdlI5>teAFKNPZq_350NEk_Q zi+*7K!S3wY4^{^c3A|g*Vfcg1P?Is2DxQbC80}3Ei z-ShqGC}>q+{t(Rd7BqrM-$>hnF%HjjU%PH5qsvC#V|nBH`Z+qNgWcF73aWdDiq%hI zh1BfjF@#ijNL3p*T(zY(6Nvm+c1AF)=_05D7W8xe!J{oX5SbOe*e&)0{>Ap|JII*x zexYl7eAvB_HZ>d6;(I;!HI(wItX!+%haI62Aw?>5;FlG@j-IucZ-Fs}-a*2PG#6blg;(~$kRKnTjrLCWis(ty1pQGngI)uYv@i-N zZdX!|P}9Xidq{T`WjaH)s2#apJaPjtC*8T$KuLWHO@lQ}{`S?R=wU5Nin@pSK@@G= z%>wlzx7xVRu{)QrLkkUhz}O6-TtQ8?Aknsk8=?dT>SBkzP#A?obgBHgPXz~H4c~*{ z(*~&B8JM)Kt_FYjM~d22qF6+Qz?=|fw&oZJJUxb2Z<@U>wufav9DHgh%98^cJZ;1Q{PSQx1P&hFYPikH(Z1jk70uxlB=QN{TcJ`l}L%xCSwl z2%(DAbpGu&M^NBW(On8hQg|E*y&!w14iEvFY6YQ zdPKfw6<{PI3K;92pLy_z(-Rbm_T2v8g*=e_N`{p>w;6Mj9kMw3{Xn%?bT2_*_{6Tp zMg`upLJVfUr$Mpdp+e)wv`7MN34o`-PVFtgDPV5@^b(l1bpRMOOKaDJf_waKu!7!1 zGY>t;|KasK2x#+AI#}n8o!`)|F7%J{(m~GaIYe6Wn)7l&z~s1Hq%gtz-ot{9d>ECz zz5K_0D;kAuuInjR-Ci{#dgjuz zN{)MvgJX`nH(4qh29gyr7W2sV+KSGXv=f|)oU(f@_Cpai<8R7$%>rME8UVxSLfEW^v{0NY4-l+VB4~pvi<;c*~^d1n4H0)O+{W(J?WqmSd z>0XSB2oW1PLdPN^Kmb@wRHSP)VZ!<-I|S~e>i>eRD;d!=>uWc2JVOOrML2V&6Td;5 zpmI3yQJjfXs`^9}(#uVAgOr~7BQyk`3zl;4k4V&PlqFqQ7y*$M?L|R5|D333tbZKt zxMB!WTJT(FxBr)(K^89!!3s7BsPpR^6^Q=C-soW2IS}o8xZx}51Ji&E=>nR|g&>dP&8j zFoH;ewS4LE3ZeJ0bTYV;DxRyqiMB zRWK%z-_FVaiSjQCQ?VTpVbc=|E2nF{KQC|6oqE2!6cT;|e)u!s!j~z6V5;#73cNDl zERxYJC+x=8lZQHj{m+8DG|D|sFH6_~aWEY^4VBuXYV3UM&>fG~&&a)I0_WUnsg4A_ zl&9@-0G{XB%7qoX%s1br=0}9CN0zJvKj#bocz!(iXyD`&GhlvfVebj3l z-7hT)2V+d$4W9ak`HFDk4-5Q2%s&C6Fa9x$nh_!fL(&r#q-zPp#^4LwSuxR_s<%xZ zj~$-T`KL6!+4No<5UMMjxaqE^7vg$Xf;t*aYwOPBD!LjMFfHX)pv%$n74BbgtzVxN22J|BA6cHCU->G; zL)rn!!-4;7q(E}~SJ*KyS*9xiKlXp~7wB8K_5&1W#auX<=S&OLpOa)xDQ)ZX=*sVH z`uM>jvsZQT!AL_f+cbQYep95$?#QpT?y0-5A{_+=z|M}AldRh!B1$|nFdGiGn`v(j z{2rgVYx~C4Tb<3*(p#x=g(ruZvisz3mVbm=R65n;>`sQOFFOdEPI>JNAT@7Hk5+4c z12rxCk>xvcZUMxmnUwOy1KzL9QBpYSNMboTTo@4$@cFA|jpbJ-pMk^koj!*v`%RrVi=m(^Qq7m2tw*2y@)$Gvg~$m3%COWG``R#Kl}M&s!cwD*is^^ zIT_P1`U0nTd-8tV(+e5?!R400$}7viN?E+ix|ZD9*HfMb;Q4NKe!SA+*XSM0I_d`S2VTUotoK9svFPMoXD+;J>uUNfEjv@E-|)+ z=W71=T^YML5!!OYZ>lfj=YGwir{NlQ9hG(`SBeww=rh=V05nCuz=S0iZ_l#YOk()S zkr$*Ja&PC(RDWQY=XMq@7|>bRu2P~{ar5NsqdMOP41EY~fI z2-g+KtTe_g`MPX!J83l^YwvX{(XI#+m_Kov*^_o`uV(eBa0PfZWB<%8outVx|2*gq zo&X;q^mrs9yI?FX{i>(alt|q`Ayi**shR;lz?X87>V7Tvp$sc;*Y@=pjZ&7brdY<}B6u zgu}`Eo#oJe-IL)e8A)c3e_O6=i_P5}b5qzy^yEJeS4?^;AC@Y@J39yR?rV3Ka?cMP znSWw=c(<#}7ds2t{ICb3*7XC%jD+26Ps)?}n6!>|2e~s&jz;z~1@|019iDZxJ@tZ5 z1Ew$l>=er1I#8swiltH;P*#<$FNs*;$gAwW`8QBsIe9-UM=+vQHr^ z__D(}-=-0*PlhHUo0i@a>Y`1qdEnu4uU()E&-0&|i)uZ5bA`Mr-i3!wJ0}kTgC-7< zh9n5tV{Y1y8JmAj6Nlta&UMN@o9p!XCt&<}rG?nK=~=)Xw;{t%R;T*tk0%awxOPBb zzv0@`h-`0tkfI^LSSSnX4ND?-(S?_tn{rXjZH)0KvS zd9R~T-9QYdw?%|Q44!{K|LK;Mf5_S0?aPl6zn`aHF~$P*1x37z-cSD|rupj5!vOtn zY~5Ozu_E$F@Kg7Arrk#?LVwPD?<3SP{?IQR1z@v$1+S5ycK6#@A6b8Q>punaDYoC{ z2nN+}aT(zd-`Xc=utKOk!ri|ZZoRq=02N$cOSni9b9i{g$XdoUEv~r7sqwc%tj~NZ z?a_5ylxO!VUElAI*8y)pfA3nA4?a0djbr@mwcxS(uD0d6NEHPN8<@=?npIWc{O4uC z=DBJ9Pyy(%?&@|rP2bq%H2;x2>GW(o<|%wMTWX~0u(G@@uK?Y&@0Sby0fs{%XMBza zBoDb*{d`}ZdSTI#WNR?Ehenlv+0P-3Nv?!MpkRC?Md& zJ_n zFz0coW7|b2;hchUZ@@V6Av2(j;R_Z31 zi&CUKi;i@ir&sFUf9<$Jy=P(T`FW=4>!9g(k*1diTZxkPX!-NdY25kdNx6^VF{1fC zPDYwIW4s&Ya!*TNxhUcOgg5r{M^iVR@ZLgf(COSQjS%jeD=ngq?`A(ft7T2q!j?wh zwP6EB8($#ktj=stI--#O?O~Ae$T!hJ-AsSd@e^PBzI6yTWW>MCul_kBDOwxccb(K1?TctCg)oMa|L%SP5_yR8+$AeD7#Oy`#9;0KFMgJSdYP4&YT?-*E3 zLuB+W)tz+qwKP&?Yk(K8oja9HXKs7Lti2smKj3}H^=5W_;5Edz-Z1kBV;&d>rF0qA z9dcg?y_s93OurkC9nGE0Qw}|57bT!hU`h6S*s(q7=h=dsLktTAY72qp?_`fQ+xDFL zvgqE@bwrT@9N5(*TO(C2W6XYX*&z(R@BO*G4cHjBcO{+RNcTrLEWBaI1O6Q4N~N7N zL5TZ7j9{#isLP05`z>KO6Y+&p5;djVW4nF_G4smkK4CrAcKvn?DpqFL>P~cq#N_F& zRMk2pwZaPc_;qm9BJeSzHFPgIp3d=sfW?bSk1LY{EQ5XmHvbwZV zV2oWe%%O?<2o}*F1qiq3;pAe!wc`LAuonaaK2L@B*X!VgL-?L`d_|{D>T2vf85cRv zCYQmh)gJyZd}jg2&8=0H=%c`C2A3FGh~t5y^^R`)Q6&>v0H^QPmc0ki8@@A}XC{bS zDI04a>16ZdV;j<%*RCckS@AQuUt|tG{Vp+J$$RFOf2Xfm1epdCeDG`BgxL(}lzY#Q z5{eQ^KQMVNDT!)z*J&9GZXaGUMh;-**NY#1xgkR1r5AGjo+VxAFA$L(j>aup2EF^_ zqr%(%6OTntvo_Ka8tPK*>FT|dn5yr)0oTF3&)rF`IW)w0g9Vl>hH7g<447}E`+?B8 z_Ut_X^wUb5Zp+==91D^b&|12{6aFlV!_|?j1idBG>IG5;>;u4k@J~gnzm=Z`@posVj@5K z3^jFx70SHIfmfN71AmzS35Z%=-d*{MC}iJgo#iy~nfZTEaNx1P|76pkP0 ze^{W!X$V;4-96tRJKB$e2k^#supC%q=~v34+F-{f4MK5VhylkXylQhJsiHD&Y)b2xdP6!+4XwPPl4#_&?nA+8uPVxr6aL!^x}d-(ydXfB=t{;(xWU|Dn6WHfF>dL}zcEQJ|mHrc#&yr%ab^)%w{<%xiy z##R~s)r;PQ^;@*dqnk?G-=0~W8)88%%&_Y;DjkuU+BjNN8h1|Y6yRSQGmed+OcK94 z-Sjt{@7)(2kZTvlF0sO}r)9OF`T72*nE3Ewkol*6<99qK;a&cfzh@7{VSQM+IU~d0 z%A{SCO6jMXO9>k6YUwZGRiy%6KOlyY2}d zwMNUB(j|6Ku@7y0mvZploR*+>Q>*+B#OVKY_{uBdnUls#4s=9-m`#FTSS7U5y@V+Y z{#UHF4y8s#lr{6Vog^AeL|ssB=2ms2rQ2FF_*k^q;y-Au*SU3`aotA*xG7UDu_`DP zz#v~oA`!qSBwE(g)%^#tX^51(Gi|{^XR2v`ydih3{G(S#kL?|cSC(E~3jE!W=;k;I zn{thYTLX~v4errS+`5Z2@R3`IcU}jZTl7wGhK35}`Sj5V8L%t689W}GCCiHon!^)8#Q4*h_tR+@`N!N`xpZqk#9HC5y})CP4>+Ys`?2fO@Ffqz0i zo6b=P!?Q+2*q~gQQ8QMX*%BARE{a{bcj9{Je&^(Hg<8JMVJ1uYi_!DK<~8;4Hd}g4 za&KMbrx7gZ-~N~?*SCQlrHD_ujXbj6nq+3I?u61X%Uwxsk){6I5SPlhMfT9qmZ<0B zm19)C?8tv&az8I5_t~uX0j$hK4Qtn-aEl##mVYVVxDIz5R*B;p1xPrz33`VB0ef>djCQm}sb9|@BL zBnsk2bWBEv^{W}R&HZcF)o?z36sB>AzU6)iyxC`uu)kChMMe`adXO(NVgdELT}--e!{X4$qul~Igr>MAgX_OkS1PpT*c6aqurB~hOA!^S69ByPc>__o5E@E z>|*#BP)|qFAArKT(CQ03#p@ogj-$>Tev#HxxbGy69zR|f95@~iWxMOYkUcJ#_|la` zMx4Z8$-VNoV6p$+ztn2E!^4-fN{_8v#6ERQZwJ=XGKc50j!-J|Y%g6|Z4FRT#*uWr zdY2~1*#$sizyB*n8h|T(Y@jH9Zde>a941vYrItYTVY&d&ZT@iqLcfs6icr{Tm=LjX zp0+fD!g8qTHT>M9W{C9zqTz;2+W)6uBpD2TfhLQkVsWMv&L2F5yDc5}Db_F8TaEvY zojvP)fp~{y%`UJA$!>Zs1P&!cd(uc=v?9{{x(z;VoW#75#(G^5KRgV*^<-48u$pps ztM3ggRziUL2J#VtASSvL(7dmX4+HNUCbRPev4$mFYTpTbfyb}2*kiAr-X(XVg+4`s zhpv|XDt;A^tIwq0C-VH)vn=V2pYxNYX$$|xG6HE%35*E-O#`?2Ng-pCiMm4RWZas% zbNR%eC&K4UHEi2geQc=X-3AA-#;;tFQq#%_^1JQu?1nvZfI^lvHK_;cjEfmO3yQBK ztT#pugAU7Xd%5yesfJu;AaTFmZcO|Hi{*i0{0t`_d~1eD5dYSZ)hL>Vlt|DtFo*T8 zORPV#xyk)`LGgRO-Rd@$o$h(cF>&r3FRkM_Q2!M>+ZHsfQl^-oonX(t`ye82yoG*BVltc`Tw$m!CKk-|%&!f91 zQS-F@uU25O8*<80lS~zWM@*tHfGX#DHTMrkb_Oiv9W-rcesUuPEVV?6MKLC#{Ki3n zC5!oK!F9^FX!mBbUCcRX_hp34WqNjIhhouv&vWhNRhE~Ra<0uDqejxwOF6>djSt~; z)yfINv09iiY4Hfv>>vG=Hn^HOC{v^p+UA+VicP# z${xlIcRW)+{d{tKl|-?a(wO@2)qm@|CCjm4_&e4>TF37kQ-yJ2mD03qZ}vELil9HX zWA#V2ljLRhKtWii>e8~Q7@}!P;|s8Ht`9dAtswtoKtpi{6ZI;ex2S=wQ2#lgLc)vS z=)R@wTu#NTP!UJzQ&LkFA9LFHI&SHzVWkjq=Cfl4}_X6upby*@T* z=fh1h%*FpnO|HA22C9uNJpQ@m(+eZJm9&rib6Lgo(T(?pHJ*~okWCf`%F_h#wT1F~ z{bMTD^G!u@|L|Z5qjSUd9P|=+rW~SszSvuR{_WkStpc}hC&^wg#Fgpzm8(uP#484F z0`}}~rVTR}UB5`;k}9c8eID<=^EwAcyTnC^awRzIe8w^_K_?E)1)#qX#DKW9=8Te` z0;K|;PwYXw_^P_&A{|1kaF0zarjYg$4lZTFrAb5 zYy7o}UoJV-yN&;bn0u<}TBm7_S&f^h9Viv}_UxyeqbEb@tQv#QiPalTCf@5hfQU9n zwj)x+3g85vsTq1qH9ao*csCwEbCy3CA*vt+CT7<=?|Wg5og#aUXGzh zDA#!@wti2AQiKqtL0wVEe)GUGo5VJs(zD-e{e7@*;PGxFvm=T4R<7)K@sP!dv3oY8 zZHecBD&3v2xoj5}hkcrki)f#x>9W_#U1acGBUgBegQMqO2Q_b2%t!P!G|v`&&52QB z1EH^l#Jpx>9+@#ENv%FzRF7~qmzHa|K@nde*>B$GB9X`ps9@GP%)2<2cB5@6h$ak& z`J4x}TA*EVt4r`QSstuK|j z$KxL*L*dDrpcu}jDM`FQ(`((K8&^@gWU^M50loHbN5fO@vNu^~N-X_ftJfq{p9Fpl zig8}Xu)@(nLk&eclH|AG>kn4Hay6duof7Y;+|iqH8D^cG1Jb=rbUXiCISib^5K)7x zMP=OEB6`P??4y-f6$)w^oyO3tuEUr~FA4%=L}%Dp%#dwZAU{}yabw8@v6yj=7ZQI7 z3Wk%D%4ggNY7Tv;t38aC9W+IN&t_^ zlI6xJ*uI-HcE&rI({>~^Jb)23TJv*`Jnd9RqTI2$al0& z26rePu=Op1Xl9g#yx~e4Ma6m=>ATr2?}-rWjeW;PPvL z#^6&1ER{~3!AIS>BOvM!V?yJIz<-$VbD;YFA|Aa1{F;%!VfEQ1liAuVUHlb59>!c` zTe_Uoh=kA_l$S%>w%!AN=QWEJRC;uc zp6>RtAW1sAj>gd<7P3z%porNKH;;WuHpqsc+Pu4rOYTz)bG#WzI1kM9(uAjQB1ztM*0fc zL(hJAG^x(bc>m#y>HM1J$Z0l++9tF>ISZd6#Tc6wud&0lae+wzt>)2n7CH@YKI~~?hGl7 zt|I)>rPU?(xb8@Xzx^wf>QdQT*wj!-JeI1J@KcNG(Wf3Mcg%b0N;(PEN{XrLJ3PwB zZSMfrA$4REy1(3jryHryxhFd%I4FQs4fz0#T{lNJM^oBJLH>8@f;}F^Ad8L7bDMf` z`-kHuzfUZBIg6(G@;n}12wD>eirMRJ#d$B_HD;@KzDLNIwS9XL$*&|>usQ)LOYV6u z#ZMz_6ToHELe!|FWc3Xkwhscz!W({E;sPIH|D_a_vKmNX>;!m0-zJL!0#REZA;TXO zBdYlH6r0EQUzY4KL-?>+KBM_yt&DJQZi5N1+R{w6EpsSd`x%$T! zbjYX7-!ij#IHTntX5r%UP4-u3$tjEz8VZ!6B5pNlewR!$0;KzO-MS%z-b<4Q<8X;3 zyNOMC!G+ix-DzM-exLZlAIK5xMbRIw`yJwR$Ojw>K(|I;A6mN+(Du)vDs>*GWKTN; zj31#|SsbVFtf<@a8Y=!pUL&&;S+ufKmDl+$uuINDQB!)n9- zJ_@hY!Xh@ZP8?~|g?EIels>LWo^J>HtV9?!;J2>v}K^pCI>J9`4f2NxHVOhxtLM50k|^DAz{=1ZuL8 z!s(9V5&@a)CPgjZ$nmB_%tC6qi60^( zfFpJb4nJ#sHZ@|5a<|xj5+5nhBQN2bM=m+jL|Irsu4|aXC~KUL@M%$7VU@KtwF?Zm z-L$hr1HDe_%MsnE8J2s3I2T0J!ZK{Ru04pX7ZIkCYC6Awcc;|rvH4l~|#jVq+#oY!82t0FP zPG#|n#DS!n2!hWeF5@SDR0%iJ85TS@UR$zoq?usvZ+NeOae>Ltq_J|Ko%yPZ)x{7p z{rELj}(chfDX6JW;KW6$;iXs>%a!7+-!_SY8Vul4DOV#xH8 zkEk3hD44I{a3{fps@ebVo1q;;FhOHzT@$M*#@c zgByR%9*pByCn{BJYTAS0A?$+Zt$M(3Te3FNXQ;3i7VID} zo5ER91$(&FHxwAR((cTU2X4+mcN@HeX_K}*jt@?T{MZ&hMMc!EH&t@ZWq8%>xPpm{ zhV=;B2;gG(&y}@$j8QrQ+FJho^9{c{UbQ|byb3I~$K#*Ha%{uQKK3l|{CF$yn@8!C za)DcAHKQ)x^3Py&kxEakXgZyKTiwkNyGy!1CSIqNlWmKXSx*re*>w$v zrAR?%DGr82FxjVb52>sM?v~MQReF&a!RV~N(S%!wbnB~o|CvJl^hlFJQsU<1JJ?wf zg2PvoEM)b5DREMM_9eOG`BZ{|G|th<1hr#^ zRdZVr<-XB9+QqmD^;aEc9@C@3^Qj1qy!z*}OB*C^5fO^rTR+_1`kb|YJm}(dCWmwA z3v}{)4c07b`BrB*r~U<0#;RHY`*t4Sx{^15jNcq>bFBeQi#VIu7`v{)q9h6I^T%bD z?sqo?sPf3ClJOh_G|Cl2({_tW06xW8N3|ZR8G+GC=QN0_KRf~pT0jbs`fSW!p1XIj z$5_n9pbVDR;$U8{D0vL!Zd@w;yoO4I6Vv^fqf17wU>??)AC61f9g8FrSdE9$P$54B z7oGzdEw(Q|=J^_0Q1I!eEddRgw zBM|;-`c|co_Z3@nyVT_2mQqPdvfcj<1N=ErY-Ge*9|awfWs>;E`cdihI_m1b62%{Q zql_hij2fu&v#2|DEw3w4_|G35{=7cal@5g*`4@gZ`5cs`P^adTyLG8r=5a`*;POww zWjnmH-`%09-r<6|{xo6eQDQaIKk1aIl<57T86MJGx)=uZSB>7HhVD*3S4=Yg4wWrN ze#Wu_7f1dq`D*)36S#u{or`oS_m6ht7TZErLtu`-o^})sx?~!>yZjszh$^>GMQ1XV z(SFxpy)6pfGg8*O(}sD(>W2T^9g{Jc;_eJdU1*+(i10sIOyWCG4sSX;u{c3i4G6W2 zx0V2#;|t{NRZs3c>?}u%1E9?a#``Rcjda?cH)4dNpOP$+Am56%tQzEi*+x=t4^H`$ zWGsdCfJIAXU?Y}WJoEhD)%!AuLxiwDz_vIAxoQ^s%H1+vs zr_49ZV!_*t9qD-UDW_f*Dl_Tzf&K|8p%?y}o)q+XfJKJU&%g0N!ho-vmzUTZ%?*WC zpX%PXMcCc6TPIku5#5~7B_`skV6NJxn!RgjGvnOOJ96d0sd^X>*x>{4+skp#muC(L zRQ#~5=$kzlpD@L_d$RQ~<2X0wl;KfP8d>7S8t=%rN@&_0iK3Q09vz($3!%X`FrQjb zVtsU|u~N7L%#K;chQc-&IH0ee@15yTA>~^AiqkO|Ir})Ms+|{;?CFGn8Dkw=D#lI- z0&EJ9(ZvSTH@@2VyBU!?ZEGI(zsK?MPa`_>0=GY%tDquV%qiJ0>x;QNNqrq!)Sz8F zBHiJ0Sjmra2UW$DH8kSH=GpzW_bX=HTURc*U1aCG!0o2|mBP~jZ*LANL?tFH1Fix% zzAawJHn{qXGMsctx`cX^XxrAovf?ixq!;}RIAMv2B%r=W_v&=kau~YZZorA$J^Lof z@2GnEr(oEJPH2de<-TpKBD>_+tHhG%2hv~`Ja-=0;I2_9G0>#11WyekHQ=0ITQE;G z0E_Df3ekk7P|t0{HtO|Tdm^7#?hfwpkgf<-*FrohzK_QuT@SyMH7A*nzP{)a41-oi zq|QPwE&|)Dz7ZoTp-s{p(FS-!{z6%@eg;yYVSpy8Vd~GDiq}C%T{ERj}D_2|x2U=`K>#QL5tISvSmC7P*QX%Wk`-w=l z2IP}?0m-UiRSMe>{qx4E>gSTTMoL@}gs!bD!5uurA;RspI0B0IdPp8(qr%Nx!gFmn z)lAY_`Bn82(`Jbn$l-j0s{&#igtq<~po(z1XFx%k3}+LaBk)fsH6YZoS&=N6vLHgb z;{W2Db(uJD9}xa!C?MLSLGkIBegsa=zPxlAWlJJW<{A9z+Q&r&|r}E9#6E9`Z?M*mz%2XU{#)GD_V293|Ky( z_hyjH{tG4Io2xL%!%aCcx1vjttP*N(Q23!*g}w?B$`EgTB(&T?SwRKlk(f60v2uD~ zCx^8H;nsVzlfT|~jXYX;v~+HB+Gd^_oZ$~!mN2C-%Mr;W$Y0HA@aSDe0eqePZx6N-F-BC3349HhX+$6z6>ak)u)w*6^x-P%ud z2Y(~kF|$#%l|)6RG85@MFpEc*Kc!L;d*7tj5j5_aD0Krfd!rQ`ZiwW7(&O8ddVwu= zP|g&DEhi3W@{A(EP||AxMN}#?-1iF*bhZ!$=Kdaz1LeE2HK0J|LPzr3c$mLQ-Q&dO z1m|%jGQ2ggev}EN%A}+-r0XNxP=5WO5xk`mUAWWuzZOP^{AyY0M(mmp>XCE>3au@u zNcjeyF?2x(rLx1lkzRPJSTfu@?TC(ozJ@3UbKnEKNj4HbHfaZ14d3y?R5bVDx&z$ zFLtW-!_qG_lq>oLga5rh*p`dml9B2y6Z>mn$rl<>=>(D}5?}Jj0U^5fp`!}&3ECuT zU~0ONoDS+?n4olEMs0fxzO){T+`#$0-f<CX$R16?la=>VBQn!$oq1OKTtHE;m ziQV33t>4U&!2qa#iD;UETNs@9Mv+J5)Y8S`K~?wr;Ml$a-#Vn-n0gmcwxuQ%mUQ#M zt&eJ!J&~}EtD6?aoH#j>&7gPGrUa{PGfu}uMyzajZx+>AUMN2f74IY`mXxIm0vutfh3fHL)f2H z%Rs^UT;3>(2OsfNzbF&#WDfYPB_#zcE69m z3C?Xfr$tIK1O2{4#^EKf@aJJn{mFXNCiY*F*T+z*KTII5HKsU@d0r;IDj>LobJ7j=md#1!tYvPZZ709^(?hhiMG&KGu<3ufN z4jGD@0H$m~ifI~Sqos=nH%-zx(f0waxW$hvTL?qn*3w1cnW^sF{|CGLwdp$=@#!C} zro`h6VSK=?7Dr$LFUg6g2JrXno@&ttp^XHf;kpGYZnj*LwIJXgIzK*tjU11ppPS+p})u=#&-3_2+PO4Z!yQg~8X8uXJmOx$Z^W62lS|N3+&73U3Ib z@bifs+iafYLL?)qa*!xgwtAS%jQWmp&H1cAd^NY;VQ5Hm%PwCRDNW}74lo}kc0U8RLq zHp0jWFp{2%5v=#vybHNc%)Vvi)pY~?LFn7t*58*n^m`r_@scS!Y=5F;GG2hzuiF_Q zBz-c71|N48Un2Zq7{_{)>;2dqHl*TTzeqABcF7TffwsJ%wiQ{1aDUza;%gcu^af}% z+Wc=2@YeqeSo?WhpspT5pFzcSkM|DG!wA%+UxSBRS>OPqwLsY7fnV&AkyR^lb;SmCXdbs7t z75n@9`f3FcBNTLRymq7fg?ulwbOq%M5%Hi%lJj>^vT7!+Q`-nhuE z3j8Iasg*pjtn^Mt7S->A5U0ejf^>5@h8$eIn*yF|{?dWNpJ2qoqO)ybiISej^1B(! zFBAWd3($TJ@>qxmog}Unj8J;P4#VHr^HFbnc{qjd741#98bD|I-&T0zoYJRMz=T=b zv@kLuZMYM3B{IU1?sZXn8)DTlN9DI+E=7O!S=986O9~lb9D^@m_mw1!Nr8)T?zvP3 z)Y;aiRIX;dMb)CmnX)fVVb!xJoe>>e+_V7VuuvrjOJEH}!;ldXJP!8w)wxO7#ibU( za>bStT?oS}zFrU!aWylB1N^ag^MVvyi`RDqBc*NL=umRK!)~E>t`-#zw@IfMahG~Sly3`LDba} ze2aHyc=AgSYxD9SN+lb~u^}5A?cdo5*B`zKkfUi66h}Y_uGW_<1DKbFMUnDQ9)~|) zL%AGy`T9i~P5xzMm9f!EV;>#{5|p$z4ckBw4}SXpfZVnmpeC+x>HjE(=bHGSs_pv2~( z)3mx{lD^Qd@!}dd()ey%eM4tAPSKspBJVb(@-k@dGD*>($4_UIYose0w&`m9{CI6c zShlKM;+C&90on~~xS}ZBERA1d?9dZtKC>FTlZW@&8j`84#{h zabX??+L2ho7Ahsq-UeF4h&?RbY4~-7f1HtW_dHbfD{|EGx(Mz6+1|P?DxNdVe6$a8 z(cIg4AKCIm{`03`fH`e@{zX}K9I#ilo&_AsZ+~JDcLlhqV8W+F^>#1yd%Iz9jOE3- zLr&})x5f+HH^4Qqn}uO3GL%}MUR7^O+508~chM~6X+8KdN2N`U_o{2A?56~#=qQ7s zTd!FjBe*8dvrPgKb&N(gSCSODz=5ek>GN*#NhFZ+O#qJy@6cKQTk7W$vr%-qu-do+ zkRP{C)k}2chq>pDFxlvmviyD93pq^;c(+?*E0#+QW)BTMHkMFNx8TT4k}1Mhx2VxN9%2fWIpTN$7#7Qpv;kqWcUy-_qMie zuDBSK`nDUS1nx9jSKHrmc|gg}`jqq#^jA{*wIN$$t)HI`K?fa>RNgJO5WT6=8TcY^ z4}|z;SfIm2^$GZVBX)u@-CfSh&^{RImrJQMxvO(d(IA5y62W5`NP&&Kd zlPzhEj zSU_%GG#X_*zPBCG|Dw$Rjld7Bq@7RpaDrg`#e#(z7>OvYgdd0EzdsQn{SPL`NB1$) zO23v1-)EI@K0z0Oh~$z$b=BC|YN%MAcqd;*c~ry-gYAj}0&m_JP3dWmOX_>$vPKRvKHWLn-{WLTcZ{ADF#cHUl5B=E|MrlaW9#76Ev zKNSxpM4=t|0b2A z|BB*eD5)_bA|TjLbkz^U0VJza2ZxHI{W!y!`fk$3E=vt9X$p>XDxpF16YhP3jRd#H zRQ@0t3`=wGQ$G1w3RsuIvs}KI%exo2y(9$4v7uf(KSrRoW=d=>2{a=G-rBj8-Hf|e z7j778apE`;AYmdsW2MuQkpIRterbK}I-EzoIT>{HH$X`O01p)?{@#)ikUY0KU+xNW z%%5=pIR=`G5jgLqIUh>Zor6#Ug~t91;6|+TKJ43dA608LVo$ou1w{w(jSZ3}MvMmC zBycZNgSySp#%k4J@DIv_e`6b@uV1k;6lj^wkbIBn!X+B5Z^KF-c$R z85o<90w=k8VeR~;`QfTIf+bqY5477Yo^?T8Rj=-{XX^ z7m3dgq+IlP*9E10M0@ zvf&L3z(aOs?(GqcAL4%5*g1)l>Q)9HC)_mOgv8_S z*arf2iM%lkuH?q|CWX7CKQ&0uV9mS~^CfGGB-~)sfDES}@1JnCUqAPM6i2f8OHXAn zlQm0dhg#DAXe(52Z^6(w1M7b<4^8!&YqYv>k;&gI zS_5#w|JKLxxQGa~AdB@HcW-8vga{cBHdbGU<0)jt9ubEVHI98B@3wQnz-QfKLDV)! zjGX-j*tKpk3tbODj0D~j=o?Qhir~uo$H{^P#^ZK{rfplO$B_)kW*k|uFeZZ2X9>Rd z{kv%O6^P}9qIk!15hNEhw@Pi4`@zjZ;s%_c5+`wzXSnqKZ%B-2GKjiMKX#Mq(C=Zx zlrx*;<va)b)k}ijaiFhoRhX!lq_|_669Y-iUZr9p zhgje{(=avw>0%Mqc3E`ZbUjQl-7rM+rvZOGN+1^WI(a9{mo9zh-#!!BD1PT&3ct{CxM^Q99Cx3Q3O zn_%E(6ErWJ5vl;(W?9wDMm^J10>d|-d0N1)Vy(8S4c`!1tQG^`^P+t#M7)-f&X3;) z5hqI|h&aJ%*6+Q%S$BS-SEwgWmo!>o*=YNve0uelvPo@6aF*A^&FuRDt?g)46aTGP zRPH8Qff*Wb3oBa*6#+njHw7#sB%S`HbW)2QT|O03a#893d5$2 zai^dj`N)XQ{Kz6frG+8uL9E_z$~Xzx2DbyvOh#G!b!svefE^73RS_eM+;B+n_scO8 zGP$?(B36KQ6xbM|}0F^PMZ|RMx+UA055ZwX_^0A-Ju#bp+ zFe2f56*a-6J1fZj;~E6>|NKzvgL!6t5`EfE(==3ed7q%%sg)ZzeHkJS*GAEL>#L%( zSp^}DAN<^Jb%!R4ePIOlx}AX{u=9w-h=~UeedoZ1osO~c^LsXH3&0D5SL@>il8nzA z?)2b>8oX~QSq5+PO_&$W!HB7Lh20fk7%>38s^_e`?L`;?K?NW0heF}_;vzT2ZwN1S z{IA@&tD~+U5@TS&hTeUt)~4DW(go&6D|mXr11?D>+{xzz&C?5U>+Il&S#l0(SK8ZQ z5VAr8W}6DcLNQEqkstlLnt32F0cF$*?{N{FiSb2X;yS8S+Ni|Vir#20&eUI*Exko1(Ew->suF|_E$5|$0#Pks-e zA$)lkZlocl2luU_CiGT4($T{gi5DQf3&l~;{~e^TULr6GKCCtz%XgzEm2EJZG}r~X zpn*ERX>Tiw*o(9b5`Fhpy5M2D8JzfSj&B?n#=egQMm6?XSL?(0som4d_7L9`hWjlp zyDRbe#55S5vI)|||4D{ooakbk&34p!wX*PC&k6eH+OMOz&2@K!7@H)UI4PUh=biW&-M-d#tLEJCs zY8N1;m^XcBid=xD(#LS5a&O(YM|DibWrCY%KlVPT+1_?w{mye4_s5 zVPzfI4~*sU;!%x3^L+$@`9PizcF9O~qEkYRo0OxugFIw~2sXp35@b~%bs-|aHLQ|Q zqo25jOaa}Q655-=Ch3BJmf_tQ znUF|;3Am!8xr;np&B=QEsO2D;LX3VndM}FZ3 z#UtY<2a9FO;A|L^Cu=youe1y`L1nVQQ!~!c^3kMb?I6Dun^xt0mD|uCY)uGLTVYZF2Pm|IJ5Vl62-C`t{G3-2=d)&d z+Iz!P>*t{UX9i#p{X#ZU7#Pqpo>%s9h${GKc3Bb0JaZE`oVcG^ka#0no+txe%T%uE zn6Z{UFg~Dn;1HzAcgSggU{}5xm28;HuBCNPur{fKf2>FkJb!TkY~S<2stGD4<)U)s zeMoMQq%ajwKglTn%Px|Lc`2W#vg9C6=e;44H+%1Cz6|JC&sV&pvPZlIG7;z!SQrd! zqCfi$m`W(05CMVP;dqz22i)X;hz3-{*ppsiU6C4olaBCy8;%+3i z9$(%or?EIv{ny@K8q~-+A|k|*2GhNu4UhhvnxPhbqB}ig`$FhA18efpHck#KR39$sZv05skef-8WuuPWhm{bjj;0_ifQkS*%i z+0VnIU*C5WyANcWO5kY&q%w_KQ@*&0VuMkA@h0hyGxGL;J8GT{E1C~{-V0i9K!rt~j8wkI zN^0S)oFsM(N2A0Q>%6wLOY-K@)Avph#OwJoc)_yB{uNouPou)~TEBc14NB2N09k^QLX zrz?Bx6U}7aSQgRV7Sx1xYHA8)i_SuF3al@IHlvc_-b*U z3-}nejiyx+3SG$uij8&|A1}=}Y2ZOHe)g0781Mt}sn42* z_h1EVDvFlpetlbxtVoUlq#Qso@d=711syj)&i)^Q`tYTTl)W=RGA%*z^PeWgweT~6 z6S*mkax;kz51MDwO-eG;)WDPy6qTe*CE4ONyXPL*UXvWdV0n@B0vJ(x>)T?`Zw8_U zvUT`x?7g57FYq)Xa|3sCoM!c6xRhMfOI9`ze_Z(WLc=xz2NJ!f07?HtOAxdlh1PGy zfv(skM<1=!L096TDE3h@0EObE zW)=M=UN0!(M(ZtJq<`+JF3&U&gyXa90D~jl1^#EQOyPF$E!1g6Zjzkdo}-IFu9sR= zw{awLNo2*vD$ZBrHo7`D?4r{jUvx?{t4(pwiK$Z0bpRl_q9N6a>l!6we13-+EKC1Y z^bk&zRutTMg-bZFAK(29;q5n$I@H>zFi!CXmqP)TV{@2JH+lmQGKPG!vOzlY=L#u< z!7xAWD6Lb`JhmBrB`FZ@!0~BdESMVFT~-+i)u0giDVJB}2*#%9v+E31|>Fz*VUY<}}nIi?mJ0oW8U#kT@Cz4&bOphG@!SyyA2_878c*I#Ck0)4%PK)wun)elG-AvZjuC&I|Z50W9 zt(^oh!AYk&nTw!wVRoLc)2n@LCx2^f`m=@U9ngP~nz#3t!w9jYG^F(5_8V$MasHAi zQnS9hyu+=e>tUf)36G&QP6m89i&({Z>-WXN8UTVO{Z%GA`V#0GxNtMLzoW!sxIH2X zl}@}cE52(W&iIXTr=SR>`?-Mu;=3+P&32v?Ai#mHp8!Wtn=&4;SaKu7;G=h1FM2l@ zZ1S1oNTyf?O>t_k5_Nc?lixtwqSfrlkIyx|u@JeRMQIP-X8g}R@xY!Q`3sBp*qM9x!C$724Na{~qkeE}6Iyfed8?d@UN;&OzPx#yo| zz+wRfM4Q8RW@P6!l}rOq-_`<6qyCzIiyG)2q@?#YO90J|nx)k`I?h8y( zByw08pR<7$1^J62kdQ;#^*00@cVwbb@dr6dj#FT+vlUkr3i}cORW8opQc>`>e};{> zQ96*KvL>wvE2b3Q;+ybmP;G8RSIim*t7x)Imcci1JoIHjh1p?4TkGctwnmWKjeS#% zL@?D}dixpf7!`4Ndn(E9b~ z!XYxNb+doW_OJ(42&w;?A)=zXx)be5xR}-T=7L08BsZ}NaKdf^T%ovyjs${@=aXQQ zUKYsc^TS+u}pmHUsh7%Lfy}cFhKwP1R7m=8qqNE{qj|zp9*R zd1TQ{{uA6C27Q^gly>+-G6;N40Z0$*)ajtFp5GUPwi6{MPsAmS2tR!_xm$T|+503I z$YGvGUseB=gnN{p+|stY`sGXye~#dA8)j_6z5ecQpV}U+|EiVN&mSdlqo#{-&Fd$B z_8vFatY_o==*Wc2`9dY1MvR3oy%7VesTN5(A*Qi8;~*?AAh>*Qq=O zg2BE)r2xeD65%~7&tN*AD7pLlN{pxpCyoL#PAfqx_A~3c7cDL%4iv#Mn;7nGHvtOQ zQwsyF&amfV{WMI;d|cuAwyIPK?pbZj-R$At9|H@VpFFRwUcxvz^mK>$_{iseqa1-r zm25yh{QtNB_prLfbuc_~PdLxx!Fn2lg9Bt-v>=d|U>-gq2ZxUH^AY`|457Fu`}gup z=oJ1teUR9hnUZkJ+PjqY4d7t`UPlCcGkod3c;L9mR?2t3Putg8nVhtc^S|D5h=are zc~}QwE4W+V!;-f|@~WKA!14vTH|91{@>3=0?+zA5bkXT$laS{!RWoTn0+<47^rt!T zdF3&!dOPToEG2=uyJC4Bmi%^^a;!otJBb51mamvlziY=#IojAArZ}UVX`R|wMf1(0 zj48dwOm=sS{k)W;09wOlyG!f;eov9SBmJyTVYZ=Gfks?W*;{ga((6aJemP zS**r0RZlX_B;8^!2aey0jA-ESaqco1AYFxEN{lXe;6poa7e|KmAQ@U=ki+Ky-~s4S zHfRMUYdQZfa@@q|y9Qd87&{ZHZMo{(Px2J>1TtAv@7`=$@#Q>hf2I6Iq&qY8yEH`t zooz$uhT?aV#nJMa@8sSuW2prB_}Tf^r1S&$9k!;aPxdb5RXvw(fpIh zKN>u&;8V&^6ru5w{|mO&G=i!9#lX+x83y-&WjUHHM@H+Q7Z1yIK##hU?qt~Ft7X@x zJn-KDbf~U&*{5u*{iJMXKCJ3$<#pSk9!HtnXB~4_+s&!m8<3txJHdUg5?fP=_QvjQ z4>Cj>kWB%i4HcM{La5W9$NH_Nr!sDLU_SZv+Mga zjb_o?g@~`egbynvM^DOKG@qa?JsS-1pX=^X+9;qMWM(dZ%>3EO+vQq-kUyOr`r9wX z;G6Vg9ToKqN?kI1wO^KjoY?HDE)3(_OxNb}%7Q_8^4NI|Mf-Xuz?UE(gK*f#gAO9(TwrPWmZVrv9qI1krYNmgZ4T5iR zXU5}qvuIH4cj};6`_mZOCPy7r*-}zx=d$Ni<_u6&!JNYNMmoHJESL_c?|2RmO6&iV z_)ig{{83t9@+>zuFq!uF>i61#+elG00et&&nI9nviz-HpPk zNG%P@5{iVBfHWcv0@4C1B_JRmNdL~>-p}Lnec$~D_MG!MGiTfluK=wf_jArHg861m#d%cabC}XZ>iB$2i z^tDc*^WB1jpLh#agnbHLZnR49p{#G8=hM1MOS2Dp8IE&~$A_=ZOQQtXJkiT3>4af5 zGXtz=`@^B!2Xgy;(~FZpm>1R;j=mA{fuMp*PaTR2KyW`Pef}FQqFdL$rzMY6DiUH+ z<0kpFjH7BR`O~03K&<+97|MG0~&Je z9|wipt>KS|4Fup*{xHgz1_zkF6BDYS-BLIFmTtTvkrK!rc&+oN}ysoM@%yc=;P;A+|y6q=N!Y-H^IqSPh5kv33O7>v0G_f52O)ERz*7#Ar{O7$-0Q_N6{s&wGwEpJ+}^V5xgh2IiisJikYSk&II5uhuNt?WyCw-QNP@F2uHrg!!T=UOi@uX z0H$xf;_%Ge2Ryis`I$o<<$Hd&Ov%!1N@rrd2b6Y=%XyR6`|@N)`bxP`3Iw9`=*E$EU)B_rW6Q1GD)z6;{@hj+Att|)Cy$46xvjFeHFM*I z_JxQ;gf9MIbG00W!B3}R!2rXQ8mhfiXX99IY$w2>!};@8=aNvjP0Iz%)H6kuptPl`J2#gSo}FLB6e_pJ}1u z)^nu6RGWUGspn^=EmWebTP*t>cfgQr+o9DTm(Bw5FqeOM0nXNcZw6dHsEdNnCgAM~h6oT1~G6rcniQpJl6?-)=ju3T8fip%?h}+;^3Mz`0S2`v*d-_Wkjd zlv!+Y4nx0~1MN*deCkQ89-TDkQwT6h9Y&!qGGy;$wZOt4w`WtmN8e}d3l|q{kTRu& z<}_+XH_;b^W@7)?v@U9B{?r2k=29$Ogm+#>ZWm+O1$k=N;lV;YUc_q?Ibw&C484Yo zzDqL14`hP2Th{6-a&7@=+`6`5XTvBtbg+;>FnPw4^X@5QnEBuDWo`QP`7?0}LHzz3 zDvR+tEf_tY1s1r}Rw~q~LbQ%j3EomWNzcVbq0temJj7)9FwD7@gUt;YUJwCXO@6pb zq^}x}QDHI#8Aqc|M%0r16e$e^A-&bGX^74h+R2LL*;Kx#9nDnn*i-J;&Cg*ZIzv%8 zY;0S$&7zNWKG)lgy7I6-{%~zpDUZTsnIF5?KPe=-B)%}uK~ekl=u9T`Y<_%Siv8Ys zoBx^e3hIkm9XB?(qdNVKd#K@@m#i0L_1k9K=ULtrLrAgee_cKjl|;yMXs`ag%MxGH zq8{S?qK8Ntq;gWi+l@4^BD0Am_g}jmt<2`o#_hAcyZn|N>PRv>UUl}IbCGF-ZIHj+ zyD~!s<1f^UoTif2h3A@=JX9870k2HV)5=G2jBY=t3ENPqAPwJeC0?<$#%}ttJ&1u- zQ4+`|=gkRpQN8T>B4j`QWi~!e)kvf9_`-VSpuOkpN_gAnW$*pSFuL_0MCwcjXEH5K zqQ9Gcxv?VFqw|s5laV`HOJnjhkjQ%et=`b6GEyf(ORm4}RS1?ySdS{tW1mp_y<{YZ$0Xwg1c{h%!~c^7}d^n9WdCypW@L^w_C2)f-H5F7=Pdiao~fV zQKwFwfm6vBqsb#^tEE8Z(xNAy2_jR1)Irq9Q`l){LEmXh;#XfWZf<52$ej9XMJez4 zi5j23!V>?-5E9SX(Cwid>eds!#szWGcS^fny?8xDc@4!$8>=1UyVpw`x@0(;!qFVm%K{G&F0zB@C2HE@hnmA=(ED0 zh00=Ag?t@w8>|HxuOG#YmaWeryeB8$M2Q9Xh#2DsSh-3*J*Xyp?^k4heJ+r*#pZo19 zcqQIWMpH|8xtzx`0lEHS&44W#elr>9hqiI)dS6#mZMlHF?|^9Fncea zB+@)e)J@SSbdD&_By@!5;K~&DiciS`fQH)@@F6Fk&aH77GVYhh9b1@rv5btZ^|~ps zt4*vn`PpDr{dICg5Bb<&V_9H|^1~ghb3VF5gWD`6rsRCJVN=kkgI`LW6CtmUO8UvAZ{O=_x^I5ytG%QQ+A zS&}RTFP>`7IRVf5(CC}9lb)j_l;qG*nPi=Y#U|KSQ0xLOImZ1Q;+5<7QX@qYa8OET z<$VgDRbztZpSePhGsZds%2<#((mQh12uirzVk=r)$=o3UMM+kO#EXO@y&P0jWY!xt z{nx)KT%LPP4_XIP=eff1E9!kE4O)K-+1a84K6-!7c^JXcK4(C%4fBI!QH~qV9TB5&Td70M?Tc)+cUj}?%ijZn$BM}*v*M2tQDF~a0DXh4H4b= z6=)>Sz(~la?Mq@lcb%-)4mI0SIHI?e>toJtg`*R)Avb(fB8?4oRjxOb9Xq_UytYEA zG%7&GFrzm<_Q7o8M;avCB=T0|m>lNOyMvu}mp1hsvt?g@bkcgPByzX{t=w#qs3*m} zKIA)|i&VPL-uU&Me6W!+9^?eHRQ%Zk1f1`Nv!>bwie-FFLlqgCj1P~O?1tdfu11G5 z;+xymvw{ne%M_ze|Ji}^V=XZPSS#vsJ4?oMJhB?U`Fnz}MCS#I1VI-Uqc!WCPYLG! z%g*=7Lw#8$F$YqFNkR)g8rXNgt|k_lIb*+gAIEWab! zI$?J)=K)Jx$WuM3K4(T{F>TvZf4fsPcJoworV7`=U->F(r$F4x5sWer%YFRDj64+Z^xr4f$q4~N_c^WZv8vL9q@dl6L# z+}r~P^nxkiu0B6^dA<<43BJj7s^A9ZQhE90!zf89u~!>myJvg0W)~pa1b^Kx6X0NU z+WHtmM8z^M`Y7}3S2`FFXovk7PIty(5-!-*?z2qm1vx@H&c zib=tMTEb)f$P5n?WLQ3Qi2Nlm^YXE_yfjKI$;)9BI5}FlX@|w^mhbhb_kx`Bgc9bN zrabz&VMWMR46UjlkDW2yPcs>fR>VYM{OfK{WDOS?nib zqTcY!ikNK2+WN=LY*~(j`e2!TGtYeBs+Ogs4F5le~ zbzE;0(ejqhzRAn-+e71lcZ-MpyBYJj4P}ik!7`YXm8C3d7gb3Xyx>Wb3lP7kznA=j zhL_f@2`H6i0NYoi`qkH!a*c)GcWF~vPUG$51WQr7)iKEx;itk|fQ|10T zKaxL2-Z5vqc($K?sL_tHKMqAXhAT*JmX>FHtyjDpVOxBS&C~09E;*OW26(KUhTkV= zO&t_?_z(TkcXS9IKe8My+c}lzQWU8m5!Lz~4(5<{k)E;TN(Q9pNmX-pWhIN;=DgPW zb9PafV4sBn8z)hbOA^|^`|BOSuOIu#RZ55Hzw3WhTGGy;Iz{w*QYC=zpG(7oFoXWT zf9-U7L5E+|VG@uwU>;clk?G>7h3n!0LZFWgR-ZrEM=QrGqqvJa%bOw)@pKU`xe2IJ z!0Go{trI9**66rtQGaNp5_J1G#$2u_nRaboQ*G+l&vnGXkYZ#~pbb9!j!04D{CfCU z1x4V8ZEMMDkD=cRzBAsR8$Fl1L+Gn+kAJ#hYZmNUm|iTsqRjAudiClhrZdFk?;F80 zXEu$!(;~T%A-Cu1EqiJ0p743hod;{BhS6RBK3wqVKM8U>Jm!t*Lg>#N#Yoj%De zr%(8T?)Y{7h-^g-jQ;KrA7I{0xJdI$hHdO{m5*(I_n_ydUO2E^^J+PW!IyeP0bALV zPa-71(JK)>CTG>BhLHxcAg>eIpCm^jk<}n3`NAmq)VRuh{OZ_+W_C91Po@j<`NG^- zMjkIa2?tf9yDY|cMRdVecqOt@l_yO%*fka?%`5_amZOAXJ7!Cbi4*E+ zsJMqx!X5@!Aj!t1^>7$$A9OontJaB942`gQcgAlPio}X?i>TU(~Qu9c)Cwa6S$PtC3;_b|NQTaktnNXpNs+ zMPgj{z(`ARU`JOf)@NC=zaTkyNMtA!{-9-rwffSyK}71~1jH>WdI)85#RGdj2cxpg zcRcyR;>{%6D-A~qk)a&m6@8&MJ`s`U_{o*xs|GjIbehd&ZfyLUO;flaM|XNijE_Uq z_nrVU$O6fK-vG-q#$GIx&IqrNU0gT7s+s=bjAvqMIOrsq@GeJ-koOMchp2Nitq96l zK*n_=a1~uAQljk-N;z}rR{6>ujnO@pCrWm z5+(0*js{t5@IkEMT_fLRua7IxCl4ist;qVbVMltqgb=@o|N2jie5_|lo9dk~lDN1e z5d3HGq87(&$jYO{v-?lI+QmUDgKxcL@&e@%*T4aVy0Q!ABAR)19SbYKcdn?NbbbyV zuW>zirg@3KvVdeztMt{RORx_i1PYG)=8dDP?_LtJ1(!na-+v$h%+pwJWK6sEE zP|W?;^(+tNn+A+tBC|YUW3$jXad=6eIJ>dgALXLTywU`flxExK(t*)MjWnF9HV&9D7aT?8vKIH=5PB znNqzbhfV5PcJvQJ+Ae_r^NKoGapUtn4|_BevW*0lnLj_lP`JnF#0W8df{CN~(R-wj zuzd!C9B2%QRur)w42(XOu_%6xq?ZP)Srr+&<{Ok5srvDdS|4y#;=4*p3!r2a!hShoXvKNWg!5|5OCsdwj zX^W(*6_~5e+O)l0WXxhsD+n6e3-kda&RMdYLokb5LRf>W(+pe{f(sBsAYJh9nlY6l zt!By^N1lRomYuy@P4=SJ1@#lH;0vHaZ%POf zb{UjP{;W96AJ?hI*nfe(1k9^{CeH4iqc^hQra*aiX0^FvdklOOCtRk24`2yj(3-97 zS1VaT9Qju4k=92@4BV}0^e4u29Y7cK+j9zSUAR{&r%uyICF_CN!N^r%YhSa8;G4ir zXhgRaTW?=#<3|GQ+HWr&zyx+d{Q2K=&UK6q8+%tM`O$ohdP>=rMQm)bQ_W(e$kyqM z*F48_vF(U-i2V^m1TMB5fI=$p|K1OGiY2|s_)-YFfznJFQukf~;YA98CqH-VLvLoJ zA!Hbx$~yRo-EHu5ZbVJ9ICF0$&asxX9hLG$conyKMY4c-RC?> zvr{<6ZIxMu7Hhn-FL_IoP{iJKU=aK-+lGc9;fl}qzI3zyd*WV`na!I~1(1aC^j31z z8jh~*8Ka+3>}WHT<+$C-gnpp#kZlrvCkT)(xuS z4zc?6F8all`J8miuEE!@r)sEaLv)&-2%^e0K9j$^@0NBkb<-LjQVM!m{=Lxsnb5So zB{tqxVgTa|i=j5g7RheVh{Erev=Lady-Q34B>n~pG<`}jqE4BB-s$J;f;*VkyWU_S zDiV$6(_k79NtGO)&8Y7MO3@S6FP#5n` zZC70z#Jtgmd3@dE7fGEM2fT|Lv9lLKyQP9GiWNI% zYQe}>a(Lm-qBuWV*xj|+;l4%yud~`vuzl6>a<=0l(tp>Zcp2ZvY8{;JqXkrkfs$>A z*m3`4Z1PC^5u)n_p)AuR`#ps^?rZCTK0W^S%WL}T--ks~D|xnpW5?4IhT98t62Zxi zut5LM$u{HESG3;Zp}>e!0Jtx@MVFCtvh8267S;!)XCk*)dB!r0{yLdmKk$ zMf&j-L1&@mMa>RHyJatmg6_6U9JD@4r%AVqb%(R!ByK;kD%`0@cO*ZvmrgKM=gtie zwa+Q;-_6aAi!%-V?XZ`gzFxMz|3g{$+S1&yZhaEIb)_JUfn`hlpJ;FGZ?s3I+S$Bp zXRv@XJOG8UZK_9_L8)MplbhH|TU096e>z&t_K~`>P?`|~nmU5u77mukcpwU5RpSkD z(Xz4Hknas{<-aP!$jwrXGp-2Qy*LqjC-$OWWsWl#Fh|7lEA-dsWtPg24`S_9R6@j> z#leVO^U#*Q6)~9!D=9s7`$1#fbT3jDGaLXlSZ}|)jJq(?8QeUIXKU>V54YGwe=~nf z+id{6qrc(y?gLBnl2#BZG0{wSM`bmX$|g|vh6sNcLRAfFyzr;POK(smVT~5Lub6wg zm_a{szPa|F$nKz}7F6B4PQ2E?L1Y*0N=SeCE?;G%*a#G4Ka@SZf-SI;-pG-Xyrr9) z$9>IUxxZ9Iq;9D{aZ5}ysp-Z`(4V^1#n0$#b}-v5LL*6a3R*})$dm32UTqSIPw0&I zB%(U1m{+yf?|?wY|0hD~!8r}^QpZM#39}xK?3a)rG~JLj;-f>TK)HhC$H)wOir$0Y z#U~W)Wqkb*UO&gD#R>pp%qY@}TtU6?lmMD@hoLh^hOIH4O<%I@O8S$7DVM*>fP@y| z-p`c~am0BGXlseuE7@Yw`=~QE{V8Y++BL=^i)|gRPSwVBuW-u2{p{`;;eQm+mkx?)rk)!FSm;T@>K1=ULFI!-f zquvSgVp$gwjcf^X(l~s#&9MITV?g&K4p>+TV34~Zi%Vs9jz_@bi!b)TBmur zrhO_WNF!-_*OeaFI8w>(azwKO3WvD`x^|@}f*3hkm( zVEfgls$iS_^w<0GGkI3PT)Qi>pcLgs2C|m_xyLdjzWL?O?8FraB}6)gv5O**fqxM^g8Cj~fuh`hi zRn6(W?G~yO9@uSGyt~oPKUJPbInQ$BIS1a4zeNH|JGS=dJO5k-B5)NHi}>9rImC5m zGQZouV4ApA{qguh6(9%lw_U8z;9uLN7qxy_QRe&zglrC3(@#gL_u?~XT{ZwOEN^*` zk>rULhbOt;e4|jpvm=RH>f9x|!xvZ3n0Q-BuLo8rE85bj5;fPD*W3pWX*UO z-Q0&{#U>NHlFM}U>7UXNTl$>dj9XzTpqBq9Sa|X;TtNE|ZJuM*z-#2l^F^Q*GX@VV zP+R?b;d+C$$PmR3)u@Imq59)#0tsZ7iy7*cY;k}`Mxj_FJi%TzU*FA23M&UFhcnS(<-Nlw%; zQew0qhNO`LGxlDYALu| z**D5@GO@=#)*KNuei;S=^K~)oUZXA{#PG@AY$7qvuTY5NvV$H$#kFhIKVSc}i_kV( zEGajA>U}7R2_Z=0EV3*#CIY_rQY=7eFm7C0!)h_<`-{*6WUx^t^f%RpBi6-r#d8 zw2S}nhg}@*IfIk=O=*!?F9c@}10Zn`Y+5)Qb!yQG*BTB%``Pm+W zYLDad^gP=M@8f+LOW`pg!)SY{s-<@@k^7;`H)R=?`((wPb^_ZL)CC)+#q?8@5!qHT z=UFCFe1UQmr2G7aX}Lm zVshgT`_iT;k%BSj>fB3Vn;OdP6dS5h8plC>z7g=!U(*0mB%eTegm}6FnaAV7Z-sI93~?x+Y1ZNw`|X1D79h&ia>$c-iMfYhpm54En`>%+uL* zXqfPYhzC%YYoGs}X?ZuR&BiRCb2tso3u%S^wfT1v$r|LKE7>S|MB)~ z4s_6`DB>qpj|TPC!I2tgrP*F~y3~J1<@%Ttrb5I1vE^CslwtAv=^R&y+cS30Jsn;Q z>;47=>T(u220EYCvkSbuD?Yghp`)9i+^YZ-&Hf_g{vIVM&2FL!5c-&5l-<`(6$&MK z4*h~~xC{Unz8wxecGzoGZl)Z2p04~)<)^De1c*@CYJaM=5}7y@uSG^jO~vCdX7db3 zj%6>F&o#RyxyEulNbGjB(6~(mUd6- zy5jg|Xl#byL3dZ3fk-?=3^cp{YwC7v0FG32+f};-{3qEJw#U69#N{l7y~f&X|xqXA~m5vKK^wp+BMKzi8_yF z_U1JWf~iYG$Q%iEhF!)e~E|DUsK)pF-|!5hZCQm}Q}B>O&Q zM8W}h_b$aJpR*A1wX6Qk)-J9730{q3mafF0XY{YLVG-!RCToUJ0DAY<>xh_YZb{~#5TY<4N!8vwVO1TVe!JDPBRthyCYn`x_*Dm z6B!^(C)RpxB{$Y)6ulx_r8jsj1o=u67w&*&gaMq_9^WT>9@XD0)}R@6B(~?Pca~~t zvSGV@X&&}ZK9@)IcUc5W&*=A-Mrajof$oRbXRn30MZE4Q0R+dId^;WVXXwPv>(_qA zeniozIDOoB#l4>a@Neq~^`*L74SmeTz2^ zwwZ=8gP>;m>>gQq`mLpVJSoW>&)OR?pnXh7?NIjAMP@2BPDelmr>=u|U-4P8$TX0d+@_9xtW>OMgVl&nJZi}~618_T4{-e`vX5;aKG zJ2K0T=JU+SE%Ng%6j)b*gfaj19WoMcwJZR%fp|e&kpr;(1vk^DR)vrIq78Vshse;| z>7X54Fl?uhr$5|PSHLCnun3pc!eBXwv9I+)u%)fDSJPrH8^*-+uK}S5ATcH^L@0No z4C9&ID1Y4#mY3^t&ilJdS|4yI6_ec`%E--cS7kQVIhB4*QwfBRkog!c_Y9R;4+~6JL3JhoZm%69}p?idtPta*yYChgB#qj zW71md5HTT!W-M0y{FVusrbs&W4Vgg)F=fvhI4U(}QKa>?8bP4yqF#~k^ zzubANst-!hK+vhH1ju+xYMyCKc!L`61(((ABZiYNDiA?-&T5$5%P8Jfo-r;ADJ#Ba zxDLaD3AlAVTckBc)*=^3*PZES(^>5_-m&~--+#B-7Xa=X=U@8_rf|`Z2r-PW4UQAXy$mZY5kA0 z#UlhL3evs6A3L|*3<9q)5No`1yw~?~Og#NvoCF|&M5o&<0;=-7^;V*Bs*}u^4SGQ3 zomVbVOd8O$R30K1HLc9)-YBEGVp}If_!jyS9~tJL~&9aq7eEwO+GTuP!2G5GK6kh!#t>9ha=~K9V!Ic8kkEd z$qQLex{_d{o%BA+FNkWb=DEbxzIpzsbQ1tVN*D3ck%Lp&PR zmoG#*$@**6JQTlQ=kP%Ht{kVw%haHXXU7>o<9`Y zxCvq|A$PPqfmeQumQ$C3ZpbT#$azu_j?kyaNvt(&&cmOAl-O&Gy+-8slekF%^lPhx zO=at-2W<(eQE=!hPmWYI-l4`iyTnACfXqplx3uU~Xu%`B`4D#`LnWYg2Xa;*&h{sv z_Kk0Rj7YosA#FF-H2%a8H{z>pbW&^P%q|LcAID9h;7O zt;751`$%cCX^r?VXiF@Wf|qce=*}UZxakDTpy$n`4qgHpM!@&mt`NiA8HT(Lw>pV z*LJHVLCka)1zP|zD1iNht`LQLrR%-nD89um+NDWjJ=s(>V!#-)BNID<_`LiFi9EJ4 z*#H_0D_u7ie*r>Yq34zf3+d$Wo3Vi?%2ooHhr?EKA~-8o-44s!YnL?ds4&i~TP=bh zc!7OE2!EnvY5Jy~a*2Bj5|c0nq!kh(8ZF3N=ZSa8w+8oY1;`+I|5KN6rC1utT(*Ur z|IEw(B>8xuEwjw^>&&kavaa+6v^H=~WdR7t`jja6l`WZM5ULlUb0gkOKEw4b}=Z35< z)a3-PNRq`9lv6?$hm#=x88bu1LRT%u-3b^jV1elNr6kV2rd~MtbSC7o;ib+U8w!Ge zFXe!6>nkIEJz-dSfH2v^Mk?$J677;;!<9+|uMv>s}lmrl+hkwHX{y+8BJV!}WzJKnY>L@$BW=zMC>O%&- zh#nBu?Jh{(2|Rr5Z>4Yyt>eb&GH7qB>Vq!~jF=)N=0(d(kB%^1kKh%N6tYVw z1DEY1b#d=g(+&6d!$Kgr_m8I*204D>EE=?dervA&J(V9Zd+8Tvo5gmqep_q8Fwu~k z!!uyOdh<3fOWo;tSI}YJZ&y%!b@&6tS8`WE-L5d?e@lLLbRj_#SJ7b&%O`eQ%mJBh zgKz`{;{CU%_kAQT25!28p8@37V2J`8C#4^`(8n9?2UoOZOz87LT1`qr@WR{5iT)`E zC7KKGYX(|3(vHH_f9eRL6MZy7u{eLGw-Pcl8yhR-+2mq@W(>K`=2_J`5fhgv30k@` zE;&5os4Z0wPU_)ycCRt+>h@>1G~2A#jh~inO7=4NfJ%fNJ=LS0rTn(EmBi|A@pTS_ zH@D`}@gO{Z39O@77rSG{bt`TVRP(On0Inhylf=u+CIzo^-ht8uwKzkeQ+FwsE()mX#T+; zdTrTff)L2BzvoH9gLW`wejz|-6mGXj$_I{=JqWqJO~mTfK^4zP`(hRSWmvJ#cf=q9OP>sIn5MZ~;d& z;`y7SjV=PB5wSa-oq$Ed<2Yc}A)gkA&mI7wdc~qQ)Ny-0cUT@t`mJm+aeQkx5X!TM z){$hS>36*5c&Al4p>i4P$Y7$QRE;Yd4Ec3TdoHvBSfeCA@&oAoWzVALK-q93p&3Pg zY$y56{(ep`J>OVxIQe8?0er>$yIOE&W&Q2>2sIFHO2N=b?pKGQ<-Knulx3`Xlu zwYIfXwNtG=YI|CCVL#3XhqXOlCaTWdfc=e0962P$jC+4r;!X)9RmX^Y=8 zNQr?2)M#&A!%Lq-3VAoo&w*wB54EI8D8C$r5)r85L5y9pg;>3O>0O(u&}kKHr7#R8f* z6D99_i*e)*t*R?TvnHSV!EZaY<08aq>i>I={M92kR)klQ06(C@(Eomaf%bP92&$v= z89IB&yaSYm2%9O2k?Xxf%XC$zPX$XE#hwzh zgoqDT2y4A&Nb-dn6YR?x0m-oJ2<{$xG!XM;e>OyWk>-LJHEjIHn2p&Gs<)~;+oVfH zP)alH7qPOxolWNQEPwE4DIy|>4iNA7_giD=>5NNMtVJxv0N+q$v9I*p=Oa9Xi>RK7 zA?FByj#C}o4k${R6upKXI>!MP0;*YwP`IQ{bk{QOGV<+bLaAXlu&~)(qYwr9aWj%* zYB7lO@K{FTVL`sAPO(+ROK0}?1}Z9Z6aLQa^oiK2XoXw20>$YKG4Fee^oh=aDT0o4 zg-^mKcO5c;d-NP+f#eDQy^6AblDgK#l-91G`XdxOyWdFSmAba9!96b_EfziXjPcfL z5Z+Klo%ZLVn8)%e{Y?N|`K2uCiM~-uU?|Q?Cflby9lqsP2u#McAi6-!r=Umb{w?yz z+f$tOKt<`ErYE267Z=vyByZ`OSxJIE4mLLVSaY}x>gyBH+MclQ>fDk;lw@t-_4BuY zLW*(y<9~jd0RP36A~4l)5W%*Unt=& zV9Ww}sUzYRKYzjLt?(m&e^dcr$lKjAxDV7CMZIHw0dI3kfnt;Id!hv$PVv23b8CD- zHcX9*tAluHxC=o;uxA63fYSA;#NcncQsIYu zmw?eyYRpTXd@YVNW&5|?fdN|x{;V_=}q zzo*DifYk8HPr=1Gf})afY;4V`t*Dmh3aJ4tBE1Y5KV~6lC}H)HNvx4adMfJ%;?vYf zcewG%Sv780hM${(9$e%z7}kpb13I6OvAAp)R*18EVDuV<7+MLO=u0)Z@bz^D;Y~=R zgiat=F=hj~EK#MPHU7&*NkYon!x2qh_0{x}vJS!p2Soof^wdj^>+Kuo zB1BPF$}68}687Jis%sF%DqNe({@&+*5fX;?_n~tH6RmJ;Ak10y@#eA!bCtF=;^{Ax!mPn?6Zd|oP9n(hD$DHo+CMTR(j2D>nhf3+jveU^hvUJYjw;dK0$m=46Gwzi?1oHFaubg?ju{aDOb1YOMLAflL<>f=g z+wD@5HI&@f>@i^(pBqHbLa-RZmz5uz7sWJR72 z;MNop4}m~;&f$hY8>jB~L@jP_?Socmc(^z z?-I|$xab2lpABK`11IV*mYy1yl}Cn=!IY@=Hh;Crn&|0^f>gk?-XNcq@_^2gwH0Ur z)A7~&v>A!#;})52Jk(z&n7ypZ$~yaeg8MxRgoYB~>X4on0G zaQ80U__NG!-)kzkJA_Fh`jQa46uNc(Nea_^kbTr={R?AD;`xq8G`;*h>1=`(2Jy=R zjZVu-^cAFF+DiK%vM}Zg=tVQ5pa0^p(|VK9u>cAN0ITHo=74TNu7SRM= zk??xJxW~Qf$x5IFdq6e)B^y3zSZ}fg!V!OaATJEYazbhg2?-dJl}Cu!-`ziYchY#i zbu@V@uCmZ@sjuZCP6G>4xB$MiVo4igLN#qt6W96iO6ZJu4d#_zeErl^DA4*EBK1p`Uub5`H4jcHEBjp3~F@ey3Nf?oVF><3kV0$&aSfOqjj z8$SU^Qyg|*viLfnPF=A1Knuv4TKL<9qGY)3aDj)~(}&vUyI$}G`llkKxbRAS>@7oh zqv?tmZ1{0dEGJvPgy;{%Kp}vB%T4UBLm)ArtQFRFLVF%?heCxVbpXrl?%5o~rm(OU zI&#iqiQAC$Pc0_9Qdac(+w%ACw$64B-5Sdpbz<@IykYq;a#3P`Jm_6G1yf3)GQUd4 z_Ga%qBf-dGIIJ7*b-QxDEL=}_CjV%-6i{=b?{60Z(iz`)$*s6JRT9@(#II1Ugi$k+ zUvE(1R>dM!8j{Hq#?*$kfia1_c=JICd(C zBgyMBsN}$)yX=z{p>y%}I6g2xVTBA=0z`(}!~&7hA`XZj5Kxo$?Sw})3aW!BK97vO zg367QtbdNZW_F6s@-pnBk_CAe-$eaTKMeW()%+DpW!$TYqi2KxKx){;+6w)|U-#@Y z5wU3+B%JYUu?KEs&~Tso&+(E`EtzT}NeXgUVO{$6y6kpoBz5(Cu?CpNzn1%RT>JUCN z$)zyGY^XI=>#8ZcOtm5}k%944=fFA4(aNimXh+9p8pq+(pyLp%~_#{X=V+TP(=_*a` z`rl1VUFl|^SwA}i&tbZzZ`z>X%P&opsQLn0(Ql~afE+1iP@x8TVluqG9|(Cn>^w1H zc8!90aS}eX=}gAN%NNG;_7Bgie8qb_uHI-@7`2_IF)54NB8<{j*dvLP#A~BLR+nA) zNLq#n#*g5n@{fRO=XH@l4L^Ct(RAA9lQ>LX3e~xtn4WlfE4@Zd#G1oOCcK}RlG0a< z^_HTSdd_Npk`+pP=Xxt4c&gMaAR)zL>Lo91w2O9TheiEwI`wsIle^7&VcqC_p6R{8 z`IAKOn7Y_7y^0)p@N@P;Hd=<4fZyV^wRbL&1pF(G)EP8j-vK9Yo}5)aTPj;D_dHYm zDFO*(WuwR*&!%cXB=_3`nWtO zFwkzH1(Kl}ELkh3G0V}EV;tuJ#=)l)mDF*n+l+jnL@?!x4G60nNp!Tj|hAZI;n1e zB`*_S?tX2!36Mo^NQkHAJ+b4)(06<=D-af%Vn zV4M&?%=IpvTaih_dkndbc@9W^&|UD)gM|vC^G<7mr^q)_ zN=QX+k+5>(eyqJ3c})4Q_eM0L^l{q&Zx^UhYhB(sUX&0Qza&kc)+W?{Q^eU4u|wyg z6p}<+&)JPTZ+=-kOP6c&d!~6Mi-SgZPp*K^pxsmy_H|s=gs0bgVKa79{@2+?uMzMY z-DGZr3$Mm2d!u1=P%&*DI`gxvbE6My%T=t8Db~PdEN>-e^>tLB()0>bc1_EBAs<8} z=^(T>b@@fP`Y(WkqUg&*9p7=d%e?5bd8mmJF=(VgvZ@{$x=nw?A4-3koL&&P=|&%)Qo z7RwOxx7P}?`h?SD7(eUqEhw7Ty`v&u5K>3yH<T?f258{A#uwDoIEkC&+Gdst(! zD{qTYU|uNxM_rfJiRslh&!1YlzfX2VB#b`rh~H`yY=wng(Cr>71Ci;tTx%#-nmlui z=+T_Q40wQeX@(cW)3crJ$Yi>Y596pP7rX#66)$};I}QLlCD`KwR8NV8<4Z!5Kp=&D zNe+WLbYo!bU_@3yYp=z&aR4*m1|Cu{*Po|G!i>@d^@f z`q!~GLu3#^;YbAD?>g^N?Y=JsY^}A6(EH`%l?^=|gt)0mdEopZ)$UBMV%zBK;ohPv zU>J_OXa1hlot0aoCg?nfgRu%%FE(Zq@g4tsrOFC@n46|V5dTylIM<;J$qk5ZeLyEw zX)j#B6j4nPgc`&cBfVB*51=&q}r|NN4D&Ewn59QS`Bo0@*ea)G@JXLy%VXuL?$AJEP?xV~Zn?%mMZ zhGW31o77=2{Do*K_-uSqVR5%2vz zFp?+0Tv?^lv3}x$%Vr^&>jOIKLeBaAKi?s=jU&rzIQf}$z;cOTq_nbfI5)#vZC5AE z?{JY*DNN6ej*sEx6>j+)k%)tN-7P#z`@2eD2hA;ULJIRZTey3UtoG2|!P^-E+T^~> z3vVB%;N#Th9TetXtTYA8Ri!mGvTZMQ*Z)7BzB`cW|NH-SU8}ex*-~bdz4yqdj0iB}GF*z^%O zO!+O?swy6AY}}RA1mm?fQ(@kc;}jbiFP83c=0pYX@vV?XUFu=UMvaOGOe$^u``Sqz z!fjc2X{r`~;orZ^;oLD5{3+jj)H*rqG*?&|IXhfcmZS+7DQU0C^M>seutF^IfQ+KC zNa+w&!?Yd`?M70h;>+|RiXdn7V|3wTD&b>(FB`Xy)+f_wZ#y1-@&(i-h>;q;!;p9B z?W&#XQakR-$O$EvwcccHEMW*>fZ%6}eOqFDXZa?i`MeC&NL=tOY%5M@uODUpI2FdR z8AQ`?ikW>pY-YN@Fv(j(*0?DBSvDzk=8Q#z{JRT_aC+s6D}UE9zMfB9QsJ-<<3Lf7eTB~&r^y@9|YQg!)o&pSe^$r4x=dS)6Vv*prz z4&o8ueM`fUL!wJnx02>xnpuz0Uh$bD|6 zo{6b-nqf2reS&ON+3{v8>Lq& z+`Jf77A1r6CmqVx5{xtBx2QIfPE;Q~e(rPiSWZyIi7goRkJ{EH!uH+lt}*>(|4Dno zI z(B`s#?&hh61;f-{FT_Yna8);O;#3p8KYI%Ixp40~8o68ZPGsA9d5FSGrdPfxpP%@C zk*uW~lM?c`2w6h>c|o-olw70o)Vsdbznvo)0$`f6GvXfe5m)RQXPeDI4BN8)g_yw` zlo?O>lE0eGe3@#!?#(a#b@9K}9YY)>DH`AGNS+84HG`Z`{5R49< z)!gvbX?7}Q!pdjRP;{q{-R6cfx|-nu%PQM5u0#SVV`QmAJJMA;AOU5qT5t4XA;zw$;W5?nFpXPJh(RmwQHXbpkFQZ7?>7(4jTxcD<{@AXhfJ7I(Mhv&>y=Ukj>asU-H_yat3 zLVX$&c!D)^j^WH0qx_(jTQeXoZXDw!M>&A-hVuANJ|f@!f8Cd;@8~vJU5fk$7aj`e z;~~{&E7EaSkE5UV*85hdzmU50QfOwzy?KzqJAg*{yhj*eS%j1K(vdX1GZ~BHZ!ava za>^B5zJkFC&-V7@X7%N+$`K=0aN$OS;gYqHZNa%`Rrd+`!@0eq-$~pZhZS5z`XBD{ z?iQ3)UW%e;uI!ujVpLpruuzDI#v+ETuu5C)W&EdeN{It)8Q$#~&qFS?&cjx> zkB{b_PH6!7U%_ye%YN&7d1tAeEyk;JSudj=!+swHbxACoCgO}5^lqvz)+rk%gg#%T z?NXIBa};WHb7%DY-eE^k;{wfMBac!?j(i7&g$3n5rvuG&a#C+f1uWzJlpX~;*kX*T ze=BAVmO6)N3a;dNo~*R8@)3H}+`RaSjY7t6@$~tvoB!H+dhR50Sh`>n(me$DPLc>q zK8x?4jVMqdnTY^NgIdUX@~K=K+v~~~mR9ZmeEfYO4Be~#6-7Tzji_ut>gAsv`I3fm zUK90PZTn^5`Z9z3~Y+?$rxkd?ChR5uV-QI?C$TOI*be^>P7Y+bs1qpFBn!P zoQsVd`Mvs?I^>XLVdz)g)Q=JVsY}^1ZpFXt{DO&mfyVOR7#N>Q4u+T7PJKI2$k5eR zlbgM^vYm9itTJm@Vqgf-bei`cN~?r1seL=T<(RcrDaY)N+>R0i$c&?g}w+5{ce9zWn`Y*L6 zb=|NeY#Q7`eF&)Yej$Y4B(?L|qHNb4=*NJ2@2bMzN@R~HO3PvfKltdMS&ib*ZH`jq zMIT~{IqZt1j7ewMY4JXQEkw?4ZsmefMwXx5$}eU0YJ3^!tlPC~R!DjQ383H$DlI}e86POL%d)b^l<*grd`f!KJNm_z2E&DZgt@HhAoIn?8d zzj(*cQM<9K6}OhGdF8onq1$aCMnfYRIb^1omn7ZJVHOkYs!^v<;v_N3<;31b6L;7C z1nl$8X>-vI(*L-fuf{2KE!Wgk0@p7P`c}_Wf8HVMeCdhcMG?t@_0eEWESUQ4Ky0X~ z@7G07DZy$l1W&)#yD)DB9oEH8a8l+S$8cNZ3T7?zx#OQsNqRP*+4wdNOui{ReaZO(i!jh-1c|gCAXF37wd|WBHjH6;R=upCsEt$ z2_ZMUO`6~ok~=JP-d%KHk2r?gRxFqO?Bx1Dj`C&uMP@S<79XPYCY^l2MKQ5*9tZvP z>9eAwk+Z58_u+f1ryi@YnF@iOT$mUj*^hJmf2Ya;$BXe026GL!QLbpld{hG<@i5N6!IjRzRkco!6S-N7Kx{X4`*N*yD%#KX$ zUiruYqAt;Xe`tCWqNd)mHUihB7=3O5l$^&x7CI2VD>W=mLQPEQH!mX{3NZ($ z@fPR;9{cLLjeEto--WV|RE3v5+~bYg9Mkw^fXJSF@Z;4DR>QfI+KvdGi;4G=A(V=* z70Mn69SGtsai~~|^VRb&$Wr(G31N-t2GQ-GURIy&MQe;Xf9PA*k&pIX^Rbz5aHE}* zJJYt*;woEsHpqAp^Ad;9wS1a~bc%qul-6H7bF%1xj58Y3kSNj$;w+M-x{fl|ys>Sa z-v<7_fz7-GXONi-`< z^BrGm%r0X%N8`3dkS6hZguFa#_77IcB-cl@AS8x(ss#P1uq}J2<%zD;pNo*6sSW7m z+VH`<`SSc2+?0AL#@Ex&Dpi5vOf8z@DeITbsOuzO%NP^(Z3bhld?wDAWUbOPT#n>1 ztAUn;XhlSlV%Bb9e$?{vW-3^4p8Mm5TJI05b zda0!u@@>r}Gnp)FxVlrb9V3Ozls}T$k-@MWsq^flewHPi`^AxJ6CLif{4ZG)p5n6= z#<0o}YW&`K*5vi*%8|7f)CJEh{+QC8F?q2Vg(MFHa5Z;ETGv4nJA#+a47> zZM9rCwQ}e@rW`lMIP-h+;#8+ZXnZfV>S*YN?X9_(ljFS-A4c8NwX08Wjei>h-^@)v zMx&f~-}VUC@IZVYX>9#oDa;9q>vEM$oqfRfJvm28&L+meLe``xqPA$dQXjh$fZG-> z*IDx0DN>aeeOGPeCgd+PPC7j$F?h$hs;fIYvUG%NfKgWOshFJD;&#%9-2GC_%Igdi z$9-IQHaH>BpVvnXlncn(?J~#fZm#@s?HRSB@YiI+_xmkyi#)$L}Gj7VO;Hu@Gd2}U~HN>A)hvJH%D!YbV6CZe?3{>|6PTsH*y z3koOtvC02);T-H>$qxLm%vH8|XkShGo&UpfXGOOD`Wa?WrFQt%f72%Hu3b;A zmpwxxoNCPAokE+np^V0{gulVN4jYr<68ALKH-1NU)^V|j!`cS3@7rnLqA&d&*xm8i z-FhJXkeTq4eRz=AH>D-RQ4AXosss@7L;i%j`m&k}I&ORna-t3z*1l@h;mLp0MlK$cMDdgpiLNp-GThXI z(#*t#@@0!c;&9HHWhHE1R>yVyGjh;7E-X!oHhUyHp~)0CR? zsK<=jBf11pvW$@*azmio6+zOw$t@9SdeYgOGAu?s2!Rt)yl;7`B%lMK$*2>Bp_01+ za+k}~#$a(nL>}pdPIFf6jNyVIDXc=LIm|CeUZAl(rz|EjTA80H=gv~{bZ+&OkV|g2 zxjH8&9b!o0$+lIZ!y=hc0G)Bxt;dfXZ}=T+S-!m$R2%;(3NC^mB=L^Xl?%`A+zqG^4K)TVUxKvD}qP%8qRGoq$wQFLYPydW{ zYh!B8>i~K)ANl>}E{u$~o|Sn|C;f++vq0gYto7AZ0?W?-E=iIZcl324C>YW}-z)b1_Kc3sc#Jtwzo60{MOR?NPQ%28l$ z$M$(saX~s<-C?221dk9lQQDpv+>2hw%Ku#Pw+WACoOn+B@18JKoZ8C6sej10#}pZZ zq_>k>s3M`~F0;csTNUB|EAWQQqU89P+*NRbv&7WEEbtlh9cgpg$u3Wl`bOxRt_p|Z zI?WrxB3NaL=v=r-lwgP>w}t#7Be5PnLWMoFu4i?)pS*UW9QdQE$h=R;K~ou#q(Z$V ziW6rXAs-M9sg`@Hy=q)*CjaUitXk*$n}BiSK^>#N^yBZS{i&NmFyjW%zCc|o8MbCU ze|Kn6E)dak?#2X5y-DKq(y4#~L;uw=(`T?dus`YTz-Y$ypJTjt-q$RD2cHiBn=fLe z5@Pl5uJL-ktk>34(Hg1(PrMAfa2LUy9?6n(9)F_;=XFg3Eyho9U! z^EYnx9Z!%neN*lAGV#w9%F9Ghjt!g96EkX#qbliY0t-(R1leej{M9RPESFWAi>**L z@WxOOcV_cly$wW^;Qh(h6@M1DR<@U!u;LuP0fz!LfXWO1V4-Z{VmS+n0L- z?s=tQfMbZNa8UD;7cO^r+Nda&1=*~DVNK)~C#5+x#Bjo{hmml%Ji->ib(MbCmxQwf z_aeTWJHxyb7x@NG$?t`Z**OpXK2J?nWf9G9bOa*}33VSl1;o7_S=1?jwOfBxx5-Ff z*V9bt$a=7Jw5k+RUfChV%j!kfPV^z9!Tz77QD_9o0YI9Zc)hGA!v4m$EZE!VbYrsg z9+W=GuU{zC`DTUp)CntTAttGy2%?|}3EcE%Z%zZ!bwh~B{9z9d27N{$erH*n8UFLVev%L zHU9-kpb3vr!00kE@TP~^BACh5dx}Z~MJ0qxso_WIX4DE=&$i~derM1u5N1P&!9dza z)O;LdgIw52oZ+}hVgVO*`MIVlVSltdHPT(x7#Eq-RLvRjE$i4mt}QrsBC0_ZG&vIN zC*&_aPCC?49x|pV&)e9}eYWmNCP7Y-EM2v~U;fJ`|4(6T-=#nSLy{F2oB%<+nBZ*$ zSw$rd15t#M;hoSxxI_C>`9gF6aBj_JXzOcw{n6ix=i8Ym$O>Z=2;tS&DXlX=WAfX$ zSTV-y^#`1wl&5f#M!!;8`$-8*CNa1(@F~ zhakvtm4hbeG!iq@w|XtHlm>;B@(H+M-Z6ILc6r5?J^R1n4;l-Hs~#G5+j`;kQPJBp z5@KPx=l(Z~RDBeSld+5g;gexc?9nN!!VubZ?)XW#zc~oQWr3g)l9iAsKlJmh1w;gC zF%N)sLB$JyLjKr%)2I`!K8YwlM4#3T*mnOGoteobCDJeCVkpLWbgd~U+)60Od#WI= zXs+voF{6C?>Hu`njolpNhuu>a8taBxQ`D7J7>~6)sopzL-4*?75@qHActuoqe*%(U2i)& z>GqOpOwfXsETLPCAXjSSU;0$*uZC`(ZYc&_B1sLlLnAVp)Gua>6)>!#E z!tCvZ(N628eyB5`H;w2EABGR7_gj+2)6xJta})cvoEx$sqL-;vwH zYTEW$x2xnO1;e?*w7!b73`$@TSv2P00NC`{1QQ~B>0N_utNuy2pocH$$UFN$Y@@I zt{wNr1<9@EUPlU+zD^kJY24-im{b5$LNiii8{o$GT$kcxo3g%*vF z&{BIb1S#3}=VRx@i!LwG$UYgl67+4K6om`C`P;!~)3q4O5@Vi`LZx*Fmw61l7)s(T z(9@rF7hCnE_z*;MvSajT+$^Pak_zSbm^~$}J1a7ju|(6|&wgKs5g}R9;~(tr=B}A2 zi?Y%SgdD&9W^c&c8Q3hBUJyr}PRfHpWusqNVXG#1Q>RNChW$f{!3R>JB2^7%q9SGZ z{>QqQ>uRGCvE8U~3kM8*oga+b6-hX^N(}+ianXp}I=~Hge@sp|U};1QXe68wuku$uGU zqa`z8$qMgHTH-x zK*baL-s<`G{DmZr;UKxq1NrgMn z4L{kGSoSPPjPuTyXOWV(;;goFp?b-Hd;cK$F%ZVLlsguv2qrhVGClORnngqtJGrAD zDs5i6hcmM3s%0WLAT{=6tPX061ewPUxLt3s0 z)R;Rw|H23vl~U<9Qj7(VX3GE5krEa+o%5$4NAB9>$!;i=vR%WwJ6Znn3vP)xm`16{&4>gd;1$9S%#z>t^g< zl=J%@4c!9^+oMn~=p%gksx+7bqzUlFtlWWfw=C=R(1GjNVZw^bJJwn3-(yCi?&pvyrdyJM-ovC`&ip`=*l|6RT9>EYnW*bjQrdE` zuz)@WG6O1A5Zk|n!)VQ1dh4so7dRh+EsHH%s1IRvVgLFh*_sls;RaEaR6*7fQ1j+CMLpQc znY;+&k1t!CFZ*6Y7O>Z=e)b(mvRVjMfi6mxD?31@!F33yRZVuxdpQ4^Qm=K&$lUWg zN8|kal$E(VdE zlHj2xUGBNVDU>D=aw#ng%r0VXvSw+KedX=c6f0&e5hbi02s_1z570`XY59E)wQiR= zt4(5@#dnrRiRp2x&VkG3I@<;D^siQ=7=NZ$Mu<;;3p7@}i5B->7*jeIoK1(b3sNlC z^ZtM3uD&_>pCz0J)}`uD+*4O-?JT?tn&U{B?c@L#+(r^IB59Pz0INlG*v$_(>15`j#&ew9edYMI_Dy~-eVmWQS!ynMMQ9O`R=*b27+i{ zy2rN5;7Mx|_Zzi^-m}=_o~qrDwr%?rP3F8*8-66{C?5k0OytC##4bQ>gk-rQImlQ@ zhr}8N%T&2e^`S<0)1EnV=_iI7I()iB4)p@Q!k3s!B?jEMY{tOJua zo&=3OLhW)C3ygv82;uKDIntt;+s*Als5WK8xZJUCRGhqzpi8)LP#|;+x;pv>>yAK@ zt24mQO>ilBOzgQ1xBMD7dV$xJUsXPqWM|oIP-A3LYVlm|A?&s~IhGG(4mN!Z6ZEmj zk}9?So`IQ*ojZ9gj5XEjIMEnh^HZEM1439spR@3KneWLzMeN#Udrn{jqs>)tQNo~y zi7@>!ks2XBy9Bv3@u$}eupBK9+p*+>E|mTFtB&{4?n=EwwJ5*^80P&92dh=f4(G+d z)(%KP`dNL4gCGn|%f;|XT(exHkty*(OZ|4pEZ`e^EX6-bP#Bm{cYlv*vs}EzSS!Rv zvQ!Uw{tAP4_{(kvVWfESAMsjW25N+S6qK~3NQC`m`!dY%5xl{vXCIrTEk82#2!qVO zf!lYDruwrJLvou3TYR6QfdLb?qU4)CkX9DO*{}&VY=rbL-@Og*sN}K(iy}?GBjKx= zAA)czVbrd6oyD?`ov%@!my!S4lb)Gc#k5P$y^2^H{@v`*`?=8mGElTXhroyJ8RR1o5ml|+}DD4*lx zdHk~=;(3DtjAgTj9(z|_aWALJzJt_c*SksOCnZUpd0vOR-L{-+)2R6B@s8LjNBwps z(egu!!ZJcG0y`^S_M!oc^q4rMWlf?+PeVyFYW1}MTf{=|FE+y+OdV>#4nZ~Bp-!~= z6vF!W5XgD?{dKsL>fx`W0}Vr#i)IixQatsEZ|x~7xGw+^9+U=~NEYs!A0ZlM0Mb;q zFW#|tJThGtCebD#lkp=p`K`s?^F=JNnJF?m={ej`E~niTJ$*?>&)IfNLT)GaeIOl~ zQ?9X}I2sVR{j*Z03bM;rAY#X>G90ky*psUo7gv7K_z`~63%Ao)q`dw=i*(jZKfjqN z@Sq+lS*d-BxItq;vRA!L%q>QOiRqyVVNsDyrW(Z;B|I15K44Ui-11Sr9srF(N4uIg zsmT;d#;*#_$vpt*{o|URFRYnoz^1>m%00I!v{u?L-|$_HAy%)43vmtGk*%#^)q?Zq zXei9HkHwOqi3@`eyf}twV+7V|%RPNRAjp^M_MnN>|->T&0XR=Rwmy%}r=(wsu00Fw%3=(LZ*jKU$k|`=KrJDRXla-}gseMmN zq+Xa|oCVxt34%($Ua}|EJPZkZEt#IMLZunrzj&_c*f|^S(5(giry7Xn160Biv~?_5 z_efi02hs#l(m4AN#B(v2WB9x8{#{lkrN&J|9rm|X5R@!HTPG*nIUIOt?kkvR8~7mz zDVeww2KSC`usNt-Kl1EboLBe+9nKb{HAl`qPd+c?hQ7Mgs`Lt?_t!RtGVFuBt zC0<9<*JO!xCytmZd2%7v`0oCx=8ReBkifhATf`uoXhoeCI;Soe(#Kqe8b3nDLHpK+ zyi{%hgzmvUIKI=e2(OZBs%#{}A>EWAn`n>ffSBoL+O77iBWOGq6^XiU4M5kSV^MJR zLTKp&46}-0LQMzUC3Np{*wU(#k^j)*;MtLa%K}TETZIc2}Qi?Eftj0Su){ zqT;-KmH(RFSMfAO$ke;0qK5S;m~Bac$)O(w0U zPO)qAus;DY<1^wZdF!;a)&@NY)RB9j0e|~}6 zG-~k|dKcTQe!%je-0N5)Thn_T2e{M>*u#NUk7x7fSw{9x6e@z>4iv6#3_=KcsDv?q zgvswcW4)6r{{l|wG{I@*A2rM&jp?s?#0_mcj#Rull$LahJjm|aUqFKmI@6O0=Rxdi zC*rYHs?28w2Y7t56JlYP;4U0eQLDkJ)#iteH%a817;aWV@Z7lLKYy^ZTi0od9z6-Rg;9cj}UD6ZVq z2K?CvJ7^jXT1$@Ktti!Y6s2xQVwjj74a~=O0YRnqczOk2D$N*u2JY_(5b74?VgIA3 z|5(!gtx5YE4On^3hqK-Gv`td?M4{gmWs18o}Rpaoog^+Tj+s^(s^LgwCUwA zs$XiCJ$ue6A7ZKwZC?ebB|KG!8Fcxax9FQH_Z(P-wyBXN!OhH@8PabFUdr4m2bQ6< zIi#s|(->*tm)dkGL?eYp5M=BcVD&LGbi2LryJr6O7)V2XBBZ-rrQDirlPNkF*S+G%4Kr%q?av~H3`p!B}->KVk*@iFTef@m3hM4HUv!{2uE&Ey4ZAe z!|Mr3sN!B7ZEVjxtDBL;E`Swnk9zc;lD^=3$~v9nhVU$olvn$Qz^0qP?tk4Sm<}P! z1B;Fy4?jS!McCOh0_BR}YDR;^EimQ6Xbk&LRG~p#U&O7SR1C62uOK3raVhLV`-!(9 zE3b|AdOG13S^|KBp+%`Z?>W)#Ub6>ASaCHjjnf9#qK|<=7u+QB(k`ee zogqvw_fR7nm!PR=+i23mpBP2&_~yWQqCE`{1Iq}lgI~_HS$OeGu+%aO-bN`Pxa^nu zGCNb!s$5S^rnOh}^Y$a;{cF8=ZR4cqq*g1A=UpaU7RV+f^ zL&eK#OX;>l;(GJn6hoVF){f};lmDZ0M2v#-QQSh zn;W#6+Ec1S5o+v?$}F~esPW!=wXz>u0*oZ~{#ESLkF7DBW%+D{{XgG5+wT59YPk7M z9i{535zg>0S66X^WBsJLBH&2 zsTgBz^`Y}bXr?aF;@^F>QCm#iZT;6lgI*F7w?;SuX+K=4+nq`rQE=dcr$Kzmd`IGA zHonf&>a?QeN2$wX?s}YCOjbo4%Lk~f0><}g1@v!KJ~@GALGr<^{W)E%nLql}u_zMa zs+K3ip_gVhTiZsoF9_oXtzfwy`<1KoYpG}k0 z0xr?hP-M0gC}PJBGF5ZTRwEw>TX4Yz=%IdB9tWA`V@ZybF1I^QtKam-(D}`WNnx4> z_asSNm&G;5j;SBNy!oO8iW_KfMF&Y!O||3_@^i)?mVq=tRHaDJ!EQFx|uf9##5y+wLQfe#n6svxJ$jMINY;V@_xMKtR*92*kOg+$b*?KX!plm zeT9+b=Zn8}WGETL=zZex7KcRoa`tKQ`xs_Jt*EJ{l+5d$X@bB6fU;rdDK0ti=e38i5i4O%#Y&?|Y192Wh>Dxk|bel_6(qnbv zn960TIb>LV{Ow77Ti$y!0sHBq2iAc2SyGPuRs(lW8o)w@Xziw- zkQn)IFMv_Sv!aOm=sPV$tW%S&ExaXoO9>}j7;#&qk)3JA+8c11%TQ}SW{=_I$GhEP z&N|6+(&t3q*K>^|c6dU_$pn{ZeO>Y`;dQi_SB4b)J1|ZA0mvOXg=deLjTQro85JxX z#6|I1%wNb`abMYLmGsgm;OO~Wb@)48cPbchSIk;>&3EPn}kEFdj$`Y z*^?4!a*t~Jj80DJ=VD$?OKdk(-Z#&_N;3rv@wg*oR3)mSM2dp-LH`%^)n+L31?P{X z6}_rWO;#2rx(Tu|$ZgDs-TR*lE*M098ZE`v}9>y)GSz9)Q z5Q2Y)ut&fRgH@m6!gg#Gw6@Eh$EIsOpmCRcQ^8cE#n(>^G&rXJ@mSn>4B7>{?CrGC zSsh6}Fk7i<0QTW2*Z<_bpB-Z`dB4pZOw<{1!f;ttn4rm)fmpuIi{PTOI$y>2+U_mU zl=6I{4wX8%>JwCkK#*%k-eejrHb3bBrE4PK0TNF~+()<&m0O%@bI4IYTh#vDiY|$zMN|Qh&RaoBpqRP&IyeWiYm^! z9G_=Zb|m2Vs}EV?G5!Bcb@k79-Qw8pHVbbIKnQoCP(X~=_MvQ8{-Jzfe-OOhe23kn zMG{rR(Np`#Lb$^iLa@(&2;~$kPhf@0v|N?jV`|ywvdXtPZT@=QyDoO#b+nHKhewKM zztU|R=uD}N79Y08eVF%uMEy*LPfm~I0M$uvKIRwCmJ(Evs;{MC6s#wW zG~`C2CLAo+Up%Snsb_)eq7zCL!x{5&5AlHt--~Rp)U9|yN?hmA#(pJzf_ys@7TFpP zw)^}rb*d;+7fRX!+E;+MPXxiltw-sQLZerLvpuZupSdDfNwpxU@i5-Nmn}ooWhvaT}N>H2Tbi z=u?vzQ)cMn3D!V3xg=7=AJ^x42Pi5oS)?rGAv?y>c ztn~&|gsV?Ovi<(?pePFp8r9o2f|}N7O2fcc4Ny270zqr&+)pDTwY!r_7lwlvEE8vW zSRo*Dkbz=d#E8J)*mYRt(yG*sk;g@JWxk<#2s=?rF*t#+i985U!tz7Y8sQ1vx#y*@ypC)-4TdN07Kc_ zSd(qDJK!3ZLse^%uVds=*dB?RsEPY+sn2w44SIS?ju&07$BEwcK6?I5Z7uu1nW41C z>&V9L!dER{p4KGc@w0c=NY=ho8ZFd6Be_q!WzTB%b=*zmhxbV2la4%Q6>K4IpE9Snf&2S_% z_x$Vs{qrinDM_K+VxI|HoEt*OR)bNCgf7({!#MTQth($zC|ggOIS@|~_FV@W4hNw;M%ba-US87(68hOKsF^x)%LjWtQ7vH1a zKBofb=zYESZ|Q16c6aX49hGxs`E?m5Z^5P~*%t6!o1TRfV-tAJ(Hp|>DA*hwrhnqVcsH({;t~!^0K+eJG#w~t?>uGH`8>xc$k$O@vq0wo^&Ds5yUd+^|)ZeLD*^e*_m$%6!fwaBPT0deE+JK z1{FSJEZirngF8~BxOyc#Y`Sau{{OP3UF9(bT*pZ^3P4m6I4$;KZ)}_f;o`zY3iBo> zSvbPN2%%#z=iql!8L`SR zxq?(FRq}Ne#771Lg&}Y<_amAh5I=TGG_m9g7s`hMBaHl;di4qc8Y6ktN6R7 zUo-rdr#v|IXg7ZP!Cgx{o;9;8kAIadrVgMa^RrfhljITw-%v&d!s=X;J-NsUEt(#OOu5{voK<@$6W4Kod>wsQ? zEPHU9F0TKNF*81yw5co3Y#FnK`w(O&tAWafol?KTWnZ~Egk_`#_QAIF_9r59Z{I6` zS!azsD9iFRVpT=|4a@sy8$#42_iPyCkHbuLexWa*oT`dTZ*6)+g|&=X(}fsmtbCoT z1}jxCOUXNk{!NK&P3`^|ljxmd`HgOwj4|c^va(1?Yv|93K4&omxpw(4^@fB815z%s z7KkAfp>`0;@}4<$*~wr7vzeISPYC5-UZFsDpoqn)EWIZ5>4#8{N=wT7oHE_9%jt{p5(&4n$vR(a?i+GGLQ#KTBZFsGmk)l@$XPvF+FD*N_?BCeNE?76Ha^PWCD6F{fHgQ1$Xm{b_$~ao83aUdnr<{BX~RYgq7MqC9krWx#ZYG_J&# zEv2ZQ&7R2gH@PC>*mXKv(?-oRD_4ku@aDe9l8s>6+h*}O1tQx8+iM8QJFVj4S4Jt| zbc#{+zfenSgkWSE$%v%l@%;5E7G}vpjG8jV_D4@$h&?H5@FaXB{h+MeQj7{E^5>Ul zL|kO#i69Btci=9$wsN|h5>l#x6-VezXsmp^iY&u&8jO3|gwHhKUN+|`7M09;hmWcH zZ8}4(F%v*0^5G1;?zf7 z{on1Kn0&xVX?@lr9sNM$a;*E2m^rX%L%0+?>l)>(c3aq@xnVd3{7X48BQ3p6+Sp&z*$n*H|D(Or%* zES7z50-ta|?bx$3?j#Oxzb1;gaFFRaD#Cel8E*JV@C5G2nu2hfAB$_O{n6BkiGagX zRjGWBe(LD7Q6cl!JZ{PRAwNx7&EWkVJix{N|0L~)S|rp8^|8f zczGs^+G!a^W)$h0lCGMxJ=LU$t7y~_5fAzN7LkuGcOkQYe^lPQ5Z+=GZYTTsA8>3 zuZqkE06Dt!P+2J#dDQqS0#I);G-PgY8pC_Z(l_mPbgg#PvB_=V{xGFZ^>L9oyFUr* zrrc9*yCR}xw%TyR3+bOLx*p~aM@BfkI43=^C3-rs+FXofv@%MS<3oHCX-mFqIy?+7 zNs^8OzTn>r#|{TLiR=lGN}_4r)CZ&G`!3I{(d^UxJqhBHkR7pvS8^SW>+Aq}9%YFm z%cpoC31-h8G1*n;JGVb11Hc1XyNiunW_T!Il=uTYqu_?xI&=YR78PqTY2JVK>FUP= zM$3Eew7a?}7vpxZ2^rm(;zJ$da_m9nDk%lRMcq6A7#qwV*cO5s(T2JbTWVv1CCm%H85f+lM z{b!eDUv@|Um6+JErc+QWAUb4U(+V&4u2dL^kEnZXsp$lR>y$36ss=jT9kWBsE>fC* zzzh%Hz!zCYq3&9IF-uznv|DaoTll?7xv$yRXLT9Xdr%ZIywJtF{U*jB&IVG2yk-o$ z59F}Opgf$TB=o9+PgsRSSbLXQc+>d(H{<#uule@Lw){I-1d^y_XCQ*sT>Y>_uhs#Y z>}ywsTx0#5h@4D5#TUiy4o*&0_vpr}CEde{y(!l(YpZ>M`s#KWN;rL2_d?>V)>_Mt zSq4VIwg4#1!6X@?X+63$u|m$qAeuYHBRlm)6v+46fUEGo!5wI{Q1@g7=4Gt|ZeA!# zJQqp#?EmY87FGt}XX9Lmq6bCE{aIOAA--ohO62UWellZ8d)6l+;a=CRL#OlF3CGuT zc^J&EaPm0j$9w{xknAU=pX`kl`vs34cx|VDOXhQmI@F%gMCy1J{aNe`h8Ov*NA|lfQe0L8a?(L_y3WE zc9R7bXrYSE4*s%7^hSCld%o7G!d4zt*D{Cb7#wl z<^lmaY4$9{=?a4UJ;2n;*^g*ic-^z#;FV#NfskQIOykh1zOcv?ugnoDX4gm|#zQGt zoN8spAV(p+u!#t)ko*lhZEUfYS@w?)e7GG#Haoqx$f)6-a72^8wMv0%}PasNPVSfdJ)D$Z5Bc5X) zVH1YZUJxTIll$qN^wdD=1Xv-s^4NbP^|lvP<$l9A^MNTly&EwccCvYkld6f9|Wks!cdkURF*mpfcSs> z`Ei*o!(xCDLFT2VzjHpJKz{4OdwG7A0Gp_Wa)(c5>8YG($6W<|pE;asRJ|bjBjICm zf~te}zDFby=!=j5Nx*k>k6F(wG`>9o;ZDwS$=V$K`);MU#Uoq9EE+TpjT;1(NXmmU#guWKeTQXLXfW);ment1>p4|6zo4Nv_JhE%Glsy4ydFkl$njRsH%au0l4_b0c}lOk)v zf4>E>6M5Y!iD#L7oD%M)D?Os#d$!Yl?hTM??)Whz1CF(l?uj-Put#}O4LLN)LGe=O zr-a~;-{bth68ect6o@DEnF9D(_GinG55*vODl_qa7Zxx`&BFsnkYO;-O&b5Hyz>me zQ>#;!xk8M4BE6^ehb?KU>JHhvp0b$2B3F`@w4C@REDtNh)m9QTb?NUF2+XqHI?;P{ zt>YrS$&*M$m|+^BIEPUR&ZjqfbO?eOL6-|Lx9hM!9(iR9Ex}BNw^72lWezWKkG4TT84C7iBGy$9)`43zL2f|YfHiA$nFq}95%fP|AY<}>hY&qepc5})4G@Y+mz^Y^2I zBs<8H5^ka)4(iT2yTIwAJ=M-8R7E!iuwoUjbkIV=#%4IiaNmhUf_$lVfkrdqi&4?jiMP+W#Z!OZ=gHzxSUpmh25>86qTP-zm!o zZOB$aszLUBDLW%svXmBuEJN8!8cE2~NFgSss8q6Vl{K=L-+6j}zOUaOFt0h!y`1}; z>s;qL|81J{IRvh3z_I5$-|nq#>u_j-5eyoy(3jOGtUkr$E)`-k7VFtRHsu&(eFrWP zSWI@&NNGo#V_Hl%t%au3ZOXr!9BAzuMa-2Rl2M&w;&C@H$_d}Hyy2Js+w6sxw87#N zz%D8y5wiBIw}E7FRlMxaX6C*gjHja(hh*4`g%8F3e;r=+slwZz0kQ2F^?k!P~W=pJ7`#&4R9%{9c4Q(Og&yPsh3>8|YJ!EP3K1|)nK6ES;5wD!?W$J9>6+kJxrv=Pyuj=U8~LMhx*j2Id2uNNzjwgZ(!D}@Wu-lRw|)E>%Zq+ z?ML6s<_8s#F_mHvbU^tp)HQ%8DSf2HzUg(mdnLnfW5R{v>^M!EgkzznE=r3-LF?az z_gt5G^Vv^`L)J|JkXcgNTGu7mq>pdF8}5TJ$iVvQBM&@6m@H0l$A3VmDrhko;*J-t z69KMgf$DWP#Qlnln5ud>;Hd7(lHXh=?j==p&1GASJ0aY^7XXQ8qXJc@W`yO$EJPn|d;qCT+ztFGTfn%_t(%9auja%^dJ-*|Ff0m>tfcBWmx+ww z)AWoxx9>7crvNCIWXO3$hE`*}Yg*7cxmErT9bqP)Sypw_)pY7kKzE0U>EjsfQoL5p zke8svYkM&3foWLXinzLE4P8o|eP$+Q})oeRY%?`eq*>x&A))r@WA>A zJQz{ujIRvzbqXF;}uh>?g!`Eh&;!P#*-y^bXVq z#W%_v3-2(Uqk=td6)foP&8&50!|o0IJmcWG46PBWrrN)~lo=6XA=uOOZRS5&u*X?p zIa!kl8VpdONjLE$$WF^zW;dr@P$H%AsOe9qO9FJr3|C?3AtB8A?8QiZ4Dn)lgbQ{>YtPJ&HzC0SuJJPPxI`~%Xj zB~Ouey4fUOYIXHU;D0P9e%3?5R4nid_(j{4U`Wj&hd9T({IfNLUuaBdyX}XkkIVRO zzn?X__4vHf0k42(Y=E(L!95*m0K#TF|SR3r%FCYdC5<3cFcBqoY0BpGJ zmcJAE+XlO+4^P`CnhuRtMQhM_id$t?XtcZ>!c9+7WjA(26a748z%+sM70Hf$I@gDP z`AdAnUm!uYz%>rs_q&%2vZ}S$B?W%HgqeQH>S4z*AE5X_n}3yTA$WluUf*hS^teCJ z%r$uN%xZ^P-k)$lBDy-gJ^x^kXWt`!LK8xUR~j?~4c&EM2MjvDvW<<w<&ZiFiW7 zv)oilj|*|B$qBPe4nIU@T5gk>X-v{U4}KTK02&=AV&jmC*auewq$LZ)lNWv+S&uJo zb5GYbAu*ZpJ70SxK;l#_#r54yvZyD>pv+ScZhaqFa&ou$@rY}f*Kn$?d@p~aMST@hjKg{=D=cyw`9-NU2@W*gRE zlbKn}IG`vH4W$1B-_X0l2SbDoj3zAnR^phv4SN{8)a9Zfo)gDeqW6)&E1r}f0%5OSA=3F z4qmp+S{Em?^(b9HcPyvr+z$ci;p01EnM)^2o}#hxhaY7Ng^d56Jm5JXz!RDt`88qO zRJ8!hlmGekcp2yhBb!_RXFZ)>zG;xQ6)d)`lv-d+PCp#W=*fq3OW>p+zwXEn<3cWZq+cjstWDK8u0YVH}IBa)t4Pvz*~J_N8_k&*!Q$hOz#oj$Qh54LV;^XYk34?F6Cbu! zZE#KCzY^5UKmU17F(e>tL3~i~7$BoJl@E&qxh%l#3#q-#Oh^;a<8t5BDu0J?lW~cB z$qXWlIJDUoeP@`8_bO!PfOiQ#m!ppHK4R3+VVag&kRv;27d$i|j+Ir}oglfUF>=k} z3fIp090pH+$4uo6cvUuRsKT?Am}=|pZk~Y@Pu?Zo^9m7Yq=M>AMVhmTkrK|#{M)*q zAx?y_axoxmif1>yR*HQaC8u(@aGt>eV~CHHzwT^(>5wwvgZPMPt}?sQ*dwlzMi!FO zX7K@ZXe#nGs9c%%mhy-3P3fFqY6RBS*eJE`r`J0-C=ks@6h~QNzu0}fX@8pN19&E< z?`8bwsvoxwRP7kkX6OYl>sWo&;0gT;^*ahbiXy}obR+$A_Uy+UCR?)f5b?&qhns)g zZ`*T%?85d1bGt~IpA=QUasJ4tKBosGN%4skbMNt+EXkd;l|p-_v`%T`T5Bo;rX+A}VUWVY4sDE-_wHcVx+?2EuWO3-ya zmfpQ=+^9!g7CvpA2_b^t!VnvfJhIA)!Gr(FGD%~BKnmra58e}C5b}SL(pkm7)-?e_mEQz| zhLxqB{h~7WJ!G^!VwB2HKbx$`w5fB(Rrl|hBIk8t#BA|qKj{!@P+8rHne^yD)k`_* ziVIV5q>Z)lF*?10x5w`0B7{ZS`22;eGGF$*xkJ-a?KfXD+Z4*Eo2ZV($EeO+X~GHa z8`fdAz3bR|LSD5Iq+w=NPM_(bCb1%Kr?_XRsp5RwOzCe+Y&L zpE-SjC^3DKL9We0@K{|3EhfUvJbqAgD<}7OOpN-OXtjRak0qt?p{>o}BcpesN0G?u z0EIbfsaQPxetN5#sgS~w_Wpt6|F5KOFtCQN)XP&)x*1NOYQzb-=Dbj$vFEpjpaMw=0Hlsq@?D&=^KgA_ zL-_T%igIZLX%q+|-q}UstUSd^$+501A|(zAP2JAAmcL^rx-xVnB;kOFc?@T^Y_quD zS;Cf*y3g8$BlpAv5o2L?2}CHwH`b11GWeU`9xJYZ&#G^?|SB`zb?UdIg>KpyeZ4wP^LOC7h%;8RPC1=Y9wobT7l#$6n*+oO6^3@ zQHH)7DUcSvXb<6P#vpwSXB?g*+q~0&9Wf5t;Fs-;>&~h62dw5#B#5$zMQv7Xe5bc9 z%(2+)i2nVZ*g8B)Y+ab^@#7+mqhM=I;kFY{F1by9W2g(!{-wxgim0n`Db>{JPW!^N z|9%&L-$M`o?BZUzw;O4S+BfH{uS=z1%AqK~ULe6v7zcO;?FQZ9n8|5Tl%$n$tqKt3 zTZT0TX8b|AQ<@m47^17YBtG~Jz*&LVM*=DNkJZx{5Hjy41%%3 z-Q+*|6HG4-3qgm`oZke^WsO8{=+N1a<4 zy+EiMSvtlWNz4b5759#1BOlTH`t7)V&fUzmDF#2D4mIz)00jPI|2gi`-mD;x+v!=J z>tE-?2H8Tn&^PkG-Is9F1Y{B{*eqC#`dF#*U*4%i1OkUTw=;4^ZB`ti3-_OO^r~fT=lt=PcH*>kV z#cGfTyf|3&9gu&3@8HQ(lm7`ZHFbH{*N2z%`EmT7wHTrw$8l%T>7ZXiJ#<@b{pzr% z;?yP(DlqtgmS>ed-Q32I@#Jt9Pc-g>YtFI=l05$wI!Z8bBRedPuzA1BcwsQs=4Dxl7fY%toVl*_5C&bsrp)-nCo-CIfDSs}$tq*{|!@ zw4?`41TsU1=ao4*H3AM#z?>d;Vg@55+L)eoh)7Zy#194;AG6Ga*7_`bb+dPtOgFP< zkgu^Vuqc1iUjdxt_x&=sPT#)8%lg}KZp^i$IRa51UiIhxd;6!~G!hnBS2W73TsvOJ zMlBw{FBuq4RDf(Tt#_&Fay{*qe$A-M9EPZ|0maQ+^k$Rby-ZZ2oJ1Wx1>Se>=38Gy z>`aL%)0PZ!hj@LHl`NQ%Ax$-97UDx_aA7sizGCwl$Txkevx$&~iC!(nXj9#a2V2u> z5$Zsy+fJ63Z}a*l$4*%fY#mZVrl2wRrwhS^Bk%hFs3(z=4m&b>s*H~C~R4m>G^U0iiMl!=D`uF{>Y( zUc9W;rB*da!p46INxJ_+dO?}{wv%6(f)pBIZB}dmUzRMIw}wtAfzFcZTI%B*FC--; zX`y`N$o?Jf9LGl0TE7L88M}GVB4~Nnop_z|#UW=8^!h7* zvUVLv5&z$LZDYn$T=?OaTQQz(0PT#Ll~Y?^AY1Ddny{nxnrv?6XIomx9j zv90Y+2NC}q9ih@IhuY=F?*IwssYte#MY6*)QRtyt5A<#v*7WW5YuRcyt!|r!DBc#C3%12BIyZCB|7Es~k#qfuhgXqnA^iLd&;3j^s>&Qy+q2HQNl-CqxRNFvnR0>xccy^A%*d4MdyzwJkb{KXW1eH#U#$q`oX{(n4G=v8O6o`pIi zMrcKC#*D#Y;7XqEWIQB$0+Ibv|#2FckZAkb*wThCpv2V0P1vim@K8#V?GO`{Iq@d#T*X|MG&`eEKQMJ zrM6NY%`4V&Vll`P$mWrrM4rIv`iEf=l1=-WSdqS)jJDJhWNyjOQ&168QM189vMQEq z%gPrhLHV7T&27jkYE;Q>okt|%mWF*jSMML?k1J?gQV{QgR)KF3!b9vi9!N?S^Xmqj zUV7G6 z@6hcllltFajESRw?U#VPmz^{1b2L_oS-;;ltIpr*Y{X0FSHLR?&sk{>`UHY z+3Pz^`I~+^A>5KWYd#^bgRsx$N@zNrw6^atWq1+h?wI|W8ge>QxPA%B%Oe9&7uuy7ax$q`g)T15p<0V0$1_3wj@cWWN3XRs_hCm-EGuLblMvA@}`I@Iz+^2*xN;+B3Qc#Staqq-VVlv`|uy zjiJncJMcpnS&|V~P7j^|5Be&7YW6|V3gT>>Fn5n*F^;LigFDE$++>5;TuJYm6Cbq zpa}-;tps=lF7#1&dCz-}5MhbHqoq{s?JEvY0=m@S0kThbxSqyzg0JbIsb|gbwKq7@ z2w;`?{%u~h{J$&SY?mUbYsTB~j9lUnV!Z)Wzxc1$qL zy47LRDA*%2WDgufew!Oig8h9W)%CRB>A+6{`5k`L(F8T~OufnY0QXxZ@B~?E8Twue zcHiO2`+MI*79l)VlV>5aqidsCczPFbmz}z6MVq1=BmSzYn!hNNxhB?_deu1gEEFa8 z{zfSHl@i;|N9`eRt5}ePVS5huoG(m=5(^PRr?^x3Oq2)CuDws{1U^k2pl6a z1&j0FnxGU^4G^6ueR=C{Qzf2o7+n*X4ti`J`pih7#%a!y`z7#|`uUtO&;FZ^Te>wQg)pfaFkOrc{P{_WA|B}oLSPQT00Sjo>y;dmP? zgLY0La&1ill)Yvmw(qt2X3k!Ih+YEegIr(vIk_B#S_{$uyIri_@+uZ#>qfKbt#U%f``4z?)oasH-BAw(tlmtNCsv7ws`9QttZttwh z9bG6#a>1)<(U%fg!YWI@`|o!BK#+2ej-!LNjQ@74<1X88^^ALra~^=h_X(g&#QAq0 zFAfiH;~}ox%wO@qAwY!Cu89Cwy~(witnRiIjJJ)EgMbwq@-rzZD1y737UGv!Y;ckv zWa1_S#Id(74tEKjo~JD;k`xzoFdqXYf5}bBsB-#x)1D^I+|8|*k86o-vT9GxEcmI2%ifV%))w8RjXQyShh~nt!A5i!?NruUf9q`C z8pi;O_h+@Czxr}2NO`Lrlb5yVSE;xM$do-=8J&N&qD5hyEyf~dcLNtSZLXuY>88EB zYp1Cj5fT_w14B^nN$fxl%FucMfmDJFzb=0yS@nNrNb1ns$Jh|cg+N~<8;Eof^C`MT z`#eS<9@Y*95BO!-(6#H4@e<8#UXM8HE2k8B^Yk#oF@QoN#*Yx}-rcN9{cH4USPc-A za97g)(*b^x7*gwZ<<|EczTRNS#B*Mt-|N8^Q8@VJVh4n!pTR;H^IIQhqw_|j{}(lg z8bL6LXdI3bMwj9^gptkZSDVmG#=RGKKQV78_`r*q<1Q`t zbGG>BWEtmu$_mrxyma3$``Z(3obungXfGEGaTiWG&svyOU$*MgXQ>cPj|?|Hi)73X z?<*@3qJmvqgcOB_7Te75zZY}i>s;&k5B_g)(9pkGjeZaNwhRjD&2|4Xs3T?D>)Kl* zx}4}y0V1`@hPJAxaozre8H)~<>(ec2v~BL4-OfG5Oo7Ui@dBkS#insHe&zd+@(zlC zC)vZtt+=4F>nLXGDw$wf#0Wm{QQ4;M0cwMAZWfSQWg6ng0h2cz5&p#g5CxGoQJ=>H}?dHdm zXByHMBCPb@+)j-vJZo=>*tV(3wfg&e5+7ITC$uuqACma-el2=G|jUS+>cxONqQXy0*wkP>B9f)@m)V9+$l#l6^sm01oVnP0aB zN|E^p1#r8L=0Ll2&INqvWna6oL}Ii6oyjK@gkSx#!WxJ4AMHKM@$2X_C;Y+ndra|e zi)Pzy5p6lD=NrG`44pAUpR&c~4n7-e_GCaftIWIJ-!7MQHoyhQHPMO&L=o~i^t2pq zmZ8fg+5>VI4Se83d7-Hgt#DQzBP@m7KfEkQ-Rb zgizm)JnWE3hHkWJ+}O+>zqVlOuhDdGN#n++LO(}Ey#h}OfPNeYdt zqCWPwFKT_)V^D*c6A*7Y@5B&wnM*7M2kW8<<{>_cP?V>|cqH5BE+t*g>#lcED^9 z^K*3Ta8t#*(ED@q69W>A5~^Kfh8V&x0Oaia5R@``E`p?|p%B@5Sr~k6mV%^Co%cUy ztE3LPiaB!A_N0d{-I+k!A8L&b^(Exy0T&{|wL;mOdVsX>_5HVf*+BQf^D5`ErKeL7{~a~KhWA)e$PXgkpw;;X;JiXoo{P8vyBXm81+#N;Ui`mcceKHaN1~kbD>;D=1-1yZ)k0WynOP})B z`tB_Z%n6Ym1CNmk0cSn*k?2VuW88mnm_1h!s$y2P=M@& zr1fxQ>8dmoS@q;8-y(L`@>=trT1g;_Y-MFVoLq9Qx|S3g$oEaRPSPyv7d`~mEQ-HL zvNTOh@!PyELg=m-ANI~Pm?i``H$M8qK$L;NbMH0XP&5zbygR!EO$mi8zd^0eQQ&yV zm#?R~X)J@$oBZI^XMBZ6v}UeF&Cjw71r1)=MjhEROz2jl079&$y_c>Uk%n~ArLS8# zI6w2KM=@Wc9h36a-F|LC5TI zOCd9_17txxME5TK(u?Ip5@&r)EtMfBp26ff&zWU{Zp_A*1kGi;cDZ}`wWOq!q}A;> zV8L$XF)KDHg!--pu6GM<1#PXY4yyiHIb(czWg7w>YdY5T2ceq_Jm{=BES{ukU6Jc< zLie;fynMKwlKn2lz|ICjyRDJ1X96t`nsYQGT6bK}O1SVFSm7U6l(^1S_-j407v*Rm z3pz^_mL0_*l!`*VYw)Z8bOB^TN=nnk6r<=IqNKdCQR2_BB?Nj&s=R*lO~?5)FaNup z)^pqu8Uuql3F=p0yx2Mbsw<$F|70>I^`=Mq=+>2_jig$QSGfOtY&Vk$IRtNqK`fH{ zd8Tq3;Y#Nu%Q8trw*~2sHlG(!j6YkfN-A5Y_idiCWPh^LDIHDXnNii1lSSB|Trm}m ztc5&sxOGoKjSU;}aVKt6drGL25YG|zxi(4b6Igjxkj+pk2mZiJfMsn;xixR|M%HrE zSGRUPz&7CMdhL*KYA|2fDt1g$l)dt&R&#Sy%1~IbIA_N{EQ&(mU-Rw19I@Sv>Ce|eZo}RMV`eQ@e0jc#$%+-^NzBz!F?yjfQw9q!H&Am9|o6h-&&NTr(fsMOL97e z-mUVfNt(`ETH7&xgMl`(fk5@$rnrIA(h$_Axsa`v57f*(0FF1k#3S^1xLEj5^nQe% ztgNq3v7e6Jf5peYEq$wd;9JgQnlL-o*qGo`C$5_A%h;Pn2xcsl*gj{^9>C|oy{p** zmzhj;ViY+ve?`{k^|l`oyQ|xJQ=?aV5d2iovt_3fe|V%%gq+f~dFW>Ds46}ZW|X?| z7M+UdGP+gKsB$7ucqsJjk2b_R2ZH@Y|7|;`zQKquC{({2yG59T*5JKLBC|ZF_AP)A z2tvAW0KPUWhc&o|<3j;o-1GftNtlxJ!p*g3l1(+p`%**?HHNM8eQo^5TtOl?(kVM> z$1TT{rZVQzB0ka%b(mcTLe3_MoG(`8KnRQCBUhS#(8l(vb~8or=5&r&s#xa>WVVNP z7jk%Seq!rBIDJd(=2kayn1s^(i6Zoe?z;!Z_y(!Te-Saa0O2KgyzJFt*XKay8%{#UnW&(z{rSX2e`_(pLE6d z*ZKgU2;o`Am(1bsOG97wVG-E?C(3?RP>enRGYS!r@k6WRrT~Ezt!d!o#Q5F+yPl7^ zD&<1g`X9+{AVrnxzX#}BI_VBPgftv+AAXNjOOZ5`EvOGX4s|#vs4U)e*L;g;c-DSw zZ;)NiCZ0XjqBQL`k)0s6e=)9gP)$aBSzUsWq+#?b)bB^xIN9%)W8;yN!_KD~DEvfAGc|TSVe~22-*y>4uk8O9UV!2qYASTFPSMF{Hi12Re_9Mt<@=VI&;(7op!v3Iz>+*%b4` zdUoDK@nu5nzGGMaTB&8dsU!F$I9zUn#A%6Abaf6>4rNVPGkOe@JmJ{uJBs11?bn3K@dmas$Vj_P&afkjk6d^u!e_$y;^h$?t( z*9lJ{SC@&zUvp%?0YMO*X&oGWY{Q7$=ErT!npHAnfAsxy6s{ThbeA7p$W4G$rh~I1 z^IX_B5m+@5kfQcc;s!jr&qo;dz1XDcxUN=avJN;bJ#l_y# zGI74LaHF}^u=wzJeShiY$+BPe04C!J{-^O{Soa^7nNm@Yf|L+7Q2?WiT z+|KKYd@_U(9>NWZQFa|`c=b%HrPCSwDob+Gq>HGXh$e?(YtRU8I7f+!g4{}q{PUQE zVOx0GYbEL=NZI1lTf(o3nCCp<^w}09X?;w+C3xn1vB|fYprg0(6(D?=C!~~HGaBvd z>g%F)S=V?O1h^m3Ger=*C0?-UsmN0W)u!JxwhfE69(XX+>ZILE<7`@vrjm8|PGvJRKH)*= z9B*Uo;)EVP)kS4_6JLJRt$In#Z*TtouKMJpklr@7jA`~gAomqee*AUnfJ?EC`{|tF zapmdQX5F+EWeKP0#MML7O&-GM*SeqZvs05%q14AKBb-Xpk*S~9AeP&-cq15M#EP28 zh8yP5tNsV@GDLpDTnlzmydikT^B27-iC!CP-I{0+cDw zAL=qiYCdD*BBWR9+UivlU(J+6r)=A(L}XFXjMR&Z!TqHTJVB&k3ovn&Ra=6p=$4&lh4*d+tGgD=f3K8(^BMH)m0ls z1=JK=948Zww>afkW1F1Z@v@_Ooy)=-8VFf)ygWTtY3bN;Zsh3?qugY{Gk(7v>?dBlxBEx7(Bpc8gQar0b2ixRZBi z*zR?7n@jC@-E=hWb6nM(g+`X(?+#qY+s2ltxAKw{=C`}7cFQ(9A9B0Ycr*f{;llwF zqO-fjuR^v_v;HHlL9GEt+=Y7NeAl1IIpHfubti&HO}CbfE@R&7*l@eq3|oqioCi64 zqXo;ex3w7lsS&z9yIV54ce%P2E6@rlcgUNaaAlwO@?c8KgDpdN z(yHWsLmTk?MZv>^R_NLfa-jXBwmJxT!d#a%Olc99a^Fh}PXt!*F__Iyv*^jDt*YH~ zJ*m=DGkq~NqUb;)OYE#phbGF0{P;~UO_N8?!*#f6h{3Fya>CYfsaVD*Kn0T`J$R9to6)M&Cf-U@ z6i&B#fDs^`ZaUpjmYKg&ja z0QU40bdL(+@cJnPgZ+ z3f?W7Q@P3{F3iUGZ6>}#U1vHuL2+JbtDQEPtFJhtEWUC+5Vt@hN+a%9qu4$kQT;iC z_U0f9GXlTzg%%(%0BO}xb${qyVvhG9X^*Dt&rzDxF62*=em^r zzt*W*A#y-#%5!4jBW0DP`Q9CVhwK#iQk!%K7tlBSLWpel52;$bKvLw=YxUy428iCN zz=JI1NR!0@#>}hO+TOyleF(M1efqGvi;NR<6Rq zBrH_)a={@pA@us^W^s*zSivv2I87$18hnLsTGmSit&Lqhod&K!Q2?2p@^RHek!@e) zS2u=V#Ho@WB=xQ6vYA!oV|MMEbKBTap5?dx8>M^&ckAsAzOPniBO3QuyN&b*J3*u~ z&a1+z@3M4qo$Pw#lP7$+jV_xo3$DF(*Ka)OBD}Gba`pw5)oELx_&i5vWb%6^AU*F9 zkfS{Td8meaFvn(l))WOX6Zv=;y#WxH;B{NN|LCgbvTQbW+HAFCre*V4*V3nXvL`-s zN4-CP?IhUia!O9T_0P39aE&XGQikV_s|m8K76*mlc!RDouU1&y3zs=4M?G_6#u|%+ z?lu&P&A%RK3-wt90RwAE{{AXhzcN7v)6N2A&rn6UmvqLotFeBd zK%I8P4w`d+-h+z(4mdztq-hQ=!WNWyjz-gl*!%MfLG*Xg2b$z6&6}AWPQUtKogcfU zA#p;i;^k~{2`^NgIOJnDkYcP4OR0H|Hi$K{Gwk z%Gl$6utGLM&k5St;Z5@Ul(BD+H9LZV&by`=`8A_9xO?%swssG9BwWlOoS#uVlY?vV z`tq`N#A|2mpT|PLpf8mLuK+iP(;t>yiH)pWQ9diw2fT@_8`xR95K?z98>YA@3x=N8 zvO)8@7KM-MgHxW>ZQ8zrj|{O<8O(f6d$CpFzRVEwJ0>gay!+_4Fh3=YH)%;xvE)na zW_KLbBTWcW9eVM)rq8G$f5UgxKV&^uDY{JvI9gq-%^{#@Ws`U92QTd?r|@c35YACd zih+AGRW)a+mEJlrb4&Lu!iUCgFVWboKiX{dLlllD7z>n%-HIV`<%=U+|hJK22KcyLnL z3M~%=p0Mqrm^ZhUMpRgYae4e;CGd-nWL4qk4Mi`PnCD%SdliNs;&xH|YO(vae@50i zVQCmmgKA|Bg-(sumhVK^QFat3|3}P_s$m3lsY|dxkd(HacyQeReV%}ItSdaG*Q81c z_nOVdZfSi&a^u~9Q4V^xo*0{Zp_m}i%zw&x)L5H-uk_kN!^XCGA|QLw14+BfP1e3L zJ$tLibo$pD&x-d~Cbw_KV^k;xH3`KLbImJI3s&qte0xYof8I`Tx3}WmqMR#{n!TH$ zsY37W@h50hbl7%k&jK631Vq^&fmYm1(`v%$Dqp-*}`iOYqr0~TR6HyX_*V=u}?~{FVtiIjymkIZNK2U{s$8> zJiYgf($jzpGzLLgL!9u+2gGeWszz49tAM&w`yZq!^t9(?#Po2Fp=a$IQx+g@kmWZ% zV%KdHc%Gl{wt}ZYQQg_|9kl_T1|<%VBXM-yKl&?lS=h2?qyXx;2MSppo)^&ZJ|TGf zqH9!ad>B`A&8{SNZxf@`M%J_THmfobmbp+UG;O&HE!u${e%q%qF)!t&UxAN5)ivh& zu!oJON!t%?DpG$~d3Xi6q8@;NX zT!2T5x?=Q-w>j2F+OVfvUW)qZSkt{z7?Yq1=KZcDCHDF^rh~~**F%s^f>G++>pMyGtkG#%V~4 zU(GdV!uz}8h7zaQUtV;WGDAWjYExYrhrqmXAdWiHOD*v0vaZ_m``urVenxaU=3chl zhUY8XT;k=5I+X^S?Ve&%j-2}^b`}+=M^fx%#O&q6DSWX`F@#itzI!l=UJ}UqRx;xD z*h-C+WC)#h9n6CS(OgaYzV%!O{ZMzkN0MyzLw~6v=E8T9<@t>qeVJBnI+YdP=@>;> z9^7GPY&TTP^$VD>Lcn<(2t#6a;+zL>pE_uW%{$FFWeqc*KG@OIRR2(Qqj;2_K}`G> z<$WSS@RelwQ< zkO_6y54(!E7VlN>SrBR7h_8QcfsHlmlQA5QouQax)^a7v_JZq|4mYbrjX&|fj51Cpt22zn5ozu8qPuF6?7 zm)N*BT!pLjZ-lGJ>ECa(RWCQ(%1=(J9R6i}{nFd{=|s>{;+=EXF#Q&!+q(Yt?3@AV zfW>IyPM{>7UZ>$-Hb!o*@Xh)8^`PNJ@t5@AsU*xxwV~D(r<#6AhT0+ypG`AU4d=(b zuQC7T0?$~6GOvzUK>Vp;Qo%AGZ@-(WBYdckIn5o_sTfQ8qGlU+^;U}83;Q6{$aOGs z*b1hR)EOvNv<*hM?(bHKqd~#mO>ni~y~Xz;>E`0qXCzLjpZl$)RWsf`0iDXH8Ebi4 zoQXxgoR^euaSWM@J8g=DiYcfa;uKz2$txPDnvk24Q~hSiIQug$sZugQ!}s>t#{Z3F zRj6TzEFeI5ekM>CP9OtjuZ_ZYy`66R=vr`>dCb0Nk>>QKUOh_iMtNT!&1VUmO^gyP!>ES4mOrpy-_8HCPZy1mZ~x(J+_Ssrv7{b}{elg_@2`aq*T`*BWp zYjlkkk?q6aSE{B~xY_9ja6en30l2MdR4ZI}z4^h`mY zKObvsqL1vxXO`Pp@wqO=5m)|%1Hp*Y^zA`D{!49C7eAH;8V)4pnThXGX72H8_0&r{V7*zO_9RL|oD$ZgOmsOm+5%x=@XHX-mX5_KP{E~Q>rOHJ zEcmWP)x0%_X*_C&t<&_r^~W0@aPJTs;W3BL^wl{p(!W{R6+vFu`7Ch^LcQ2NgEaYX z+fHQ0RnAcaGu%PW#RA0FPn=S?gcah&Ih*|{yWrxOt>aB76PyFW`vZDIgJQy~Ds?~A z#jMy~vpOgs=PovOx{XH|x42BjHFDUBZtNh)9?wZjWLZ>51U1y4n8z0Js$aea4@ZzP zEU<~3Dum309PJfm!Zlw`pA-5sZB;OUR|)01;j01jIZUtqjS=9A2as5G>>|?Q-*~qq zK_5eF!R+yT2P*4iJu`#MONY2A%5V-9o1Rpt5pE{>04Dk`mf`AVhJ5InoOe);#199f zc56X3hlGmI*%pxH0P(Q*+3#?JrD^*HuP&Pn<9as>d;4ESv<7>Ti1#AaS=IowOOgydVz;nsOJ zmT_?&3MW6H&$cH2jC0>+%=syg6&L_S_KMc19$KWI`dt@&9?sYM`EZ9p3mp`4=wgF- zdhL_RP~ZGZTv3#%85oB6cA{(S2Bw6OsS&ZT`_=;u+a zZ>#DbK8DHiQfio)O$+_d(s67iI^dA=>)W1%gIlM)7>T?hAiM;o0+$R{GBc5RXD|7R zJVIqu>0y#8cG73ifQd(E_T7gj74MX6WpyZ>y*3W{=-Y5}Sx1%gjC*E5cB$KMP%m;A zt%1NG_t$jd^5+t=iRZQ%8BV*IlLosy|I#y0W8ZL?CWenKeO37=wH(gP@G#gNyd{TS ziLzT(**V^M_dLn+_=2<@=8O!)aCDRB`tg{)%OHD#XzwD~i&DLz8$;Ifo%BwGxc`$= zC*$;CYKQTwcS;rU3;z^o6o=moj?@Zyw> zvn1KP>zM(R96_;y|6>x@H`}GXyB=B}RF80(Vx)6ndoWG+a_*hTCq9BD6jtxKdkDlHqertx&euTbQ@;Pi+ zs?hjpjOTf=hq~H6@z!wa9+dQvMI@T{+0P~Z7Np;=9(9kBU3zRUnfBXA(ED-UGM1q2BZ{RC-jjw?1`ypKgbyJ$js+6t6pp2FlLT*d>SS+P$L)n=v*(7Na)#mc_zN&% zBkiyUt&-`p@SQtl&6=j?I3iulqTkRA%^rM|RUKRr!6rnB_34>?kJYcdgYi+`fk`kp zQ-B+n+q9btAYGZx5Zggz=ork`3r7YmnQLF{5Y;f+C2VuQ_kPZ~0!iQ)`WmnDR_hXP ziaSM4Hd3B%Ua5MGsG>`#EBt^lyar7;S0SEOkxcwr=}VaGT}CE~t{#zB$V3oRlOFwI z6%D%}F(#WrHKA~qM)!6XSI-@_VlG;2XjS*hd$o^YQXhv?c^G8UmF2w~>rr(bQ7ep0(ru*GaZiO^nw@cn4B|88OADBOl9_XTeH(SX2 z{uStl6qlkm%}FATy&OI(}M$ z;;7g$C*(eu6~wMKp`zL?&xV|KbC*EmP!lg$sxgL7B60|Iy?vN8Q1%@|Gv_+Sc?Wpl zN-18BuHxCd3`fPCUZ}am-FCn$G#*6<&YQVLZ$|-p`ttzMpLVXE4bSC20?ab}r|L|$ z1H)jDvYrPnZ`nH{%!u@RZr|7MpYN;Jw_ZG- z=Q+=L&Uv46-fspP-cW);G;KJ%X%z7B|EI>R>Tz&rTqc5cCjvPrWFE5Q3c8pyw5U)o z?QQN*FlE6ZZ0kI2PHFChT=^FZs9d)svkV7CxjZn^k`@E_aCjH z(jQ1b5WMGd7%Bb{nUJ)O6%@W~D`po69N}P`BJAI}{u*3kU7PIm+Q#jh`nMTqcpqBP z3w5*mq(VV?hzDU+78PH-cUf?& zSh=l(HP*!xja-&{ZewB>U2EPHyOeXiZ$Ry5Gn;%9ISv3}4|xh=*1w=BJZzd`EzO9n z={)_7bR3SYU6`shCh>hw%>df1dN-sZ6S>Ioi8j3SE~pV%ChGBWwY=mt5?i^}FM2S& z;WP`T4AxRT2SRZR1c6rK+{rt?Yd;mOhJDRV2_;+FK$bM?d$mtkX0=qGiz{6!T>18J zb0*Rny>Y@eSt22}^3E2Gn{PB`?B70aMl)-L6(bOKECp*sFofMt1rinlzeuxTG?|!mm%Eusg&T zA2-k>_G!!3n`-c53Yab~B!?aYeif)3=w+f_+$?ie>}0t~1Q}3OGY^Q0%c86iZ1*U3 zzzCqZMDn1hcqAK|CYyh%cr=J`W_)Zy3_NgE-B_G9Z9j$4zJzGTOTrhbg#|7$Kp{oI z#*TYe@kqp+&^qHsoB`eq{yA!1dIEY-rPV}}tBRM?%o72Q2_gEMWehZZi;6NGf3|YL z4^ETjCBiD2c5{(GT?#mCg#SyI1j<3UdKIhTNOZViRj0QG*dtR@Db}W~^A}*k3Je5! z&7eYN&arMk(WkJ<@;T|$4N{(3F?rj!_(oV+wcE4--Z zB^fl%f;(AUS+XD@kZV9x1qDHqyta6+ay*DIsMpa!Iz8So_)ub8=%J=o7LkFrefP@a z6g?B`oSXpWN9hXMK`!wifdw)VN5FT5*8&nO|0O=(Zq2!SQpXrenr9!IFYPLNZyV$nXB1c_OLQ+N14y0Is6?U>^D^CKI#lpize-bkVDy&e)R8h>*!%P( zi2El5`gCaYj+q$AT z{{k1$``4HMiW~qrWzve$hSPVZNslAfVQh{puFv@1YIhd;zLnz)kvZkiJOJom+TV`} z{z78+*eIi@Y0d!8*Ez^nP6z>X(G5wP^Mp#2!goZ5lE}>UXcp?EJ}~xQLrmVe`cnk< zF9ZH1Qx>Y2D>e+KBNL3l_8GA@DR*0&QF!DvFZcneXX_lBL+nDn#tUJ9v)j)If&PrW z(72ARbd}+G?`$FP@SrAU$Xjo>=A zn%sw^>EG|5NlvOTBEFq+vI-=MT>!6amYDWj>4HL(-Z5R~)jQ=Ng{#LZbE6}maC=ywg zJ46JW%S)m6zQR|+35+9mzF-ei7XoZr= zG?O2xR919d>oGYt&_#zRj4{@8!21OslV^Mq;U$^(XK*BeqMqI~A8ePV`i9=9u)U03Uj78xcxoqvV!mJP)Z_pXyo zM-na#h^=Na>7*wx5ZW|Lb~RckgVVh)cdt!ZocKm2bBJx+XRw_{c3 z!3gGAkXLU{l?f__4X;7*xJr`oTTrR7H0CfD?Bt>t5-yL4tHNU4!$$~kP8s>IP=K&O zBmp#y3~<={DFdH8dpA-S55{vO!Zn=|LbUrXGz^gFv`7|y$BeLDF5kqQr@rD$j`O_z=@6s~x=$DE}M)HzGV)g_-lr2G`IB^<0P)Twk+#NN7v+8UC zhaAO1u{JoRlZ9t4O=JlW?ta{a7y*1`|D&-Q(Y5d7NtdNd3}yz#-{5faA2I>?IGd}) z#zwg^I9P+Rc?Zy?oF4#*N?m$V1(1yDlyd@=}^AW!`by7YuytG;`R z)?m}P5|Un8G@}TlGj3mY$kF^}!h>SO_2aWjWT4piJpk+R?RSKScb-z3D-k+t#}MWL z$)7IGYJ~eu+NXK9MW5&`J+-N+8V`6qKIRpw?vs0{+KQj^T%7XqsOwwr=*pv~xl83@ zIpF>5tpSS??43c$01UcZkn0_o*2QQK$R6V@$T(A=N-_wa>fU!y?D{*xKfg-J^`lx! zi509*q?4Z9VEmmZqilR;q$V~E@4w~I09ew(iO*6lpl|#+@OZdw>z&d`0o&%L{SS6A zH%$1^uPOElr7osoqv|a7a;+QSHj3h*HZNL^VYJ@*p%Rl}KjhC%qlYAN#A;-L=u(rD z$8hPq^Oi1DC@^;^w7L1_7NBRZ2rLG1=9eEk&n>PdoK$RbH2pa-4J8GKYJIW5!a6{_ zdf+9j!kiRPtN9PoHGkY!$UDFoau2)^-v&{$d0nvUxX2i!WC*~$S881M=y(# zOI=D}P%@_^-YYjlhiB0B9*a8;d!``JGqCaFYo#z3w8{#z8Vs!k(-)UP35LmQD8`ME z$w6E6=dD{?j_5n8i@rs+`&8HV1UHNs_nVcS#f|(NC{98Jlq`6x$|TPauHn9<<`mt> z!8}VCQ=$!#!vgYqzbkQ<*26{!#KAuCnN&37kKp*hkf9pl1=QYF1l_=FgHzC+RMP_3@b7x+`iRS)De_k*kpe%Xh zUh~OD^G1W#Z!q~Bvl_f?=Vg)8-!!-iW>!UOJ~zc;t|?FSO#u`5ijI z^_~X-XLlZ5TmDs%SF8t2RD?~FueDp}H^VTpM4b6R1RiW}=1R+S2br$zN6I<<v8wrmXox=DwsJ}h_4r=0gareqFdsbL0p?90zT_V~+sJ~K`q{OKyn zK=EaLw$PV!#}@tazozE6z!90(#1)J+IByu5g? zo*6p0K-UYzvfP?cOe`QAw7XUP(A7%Lps5<&I;L**0Nki7WP((w+aG?dX}PkzWwFNFwL!C+XmG9e)Rt$?6+I7VGsH_o{O5WOEyuO$f`2SZ);29m2Z|yYcgS6$M9X_1$Uq{8) zGy^jDj9*tV|0Ph&0Vif7ah3X4G5(Jp>^P4FLZKc94%t>#nn5;9>M{rqnNfscRcFg`n}S zw}J!@2L?ZLKmN{y$2uSecobi{iJ*xRvR4QdnU~PRu7JcHY_QO_>?ADDTbaZN<`Mo_*NFK1_(60n7ds|q5c%ze&(clwes(FY<1coLw| z0wY25%yR9r)2QhwbC?#t?UJ7)AFD(c+sQ3;lb_1wl+qGH|ezN z2NGs0p4WRSE6?0`wrqBN&BVqJ07(B(#50x)_$!PHpyZ&(*#vr&aDbkOBfojU2`BmG z(ez$SF2~VYySY(}!Lt3`4bw5uuCg#0vRdTjtlb*NCldF)3Csf4u9%1;RhX-@vUbkr z1PP*(w)6ciIT!G%SOAlItzQBWjGHv*Yfdsi2# zu=aX|#6O!L+yUB}9Zy!gnol}vFwuLD4{q(QKe(03_^_$_54Y&ZtKUZjV4SRdr)3;S zzBokt3ybgMGyqKCc;psHhu`_}pV80#VC=m#>m`W+tO`G7$P4Z=E_qlCnjdrodEP1&9m2dCB+hcDy_NnF}tReLo`zjU?fR zO5FVB2jF34-U1;~OgJn}+X~O_&MnzWwuc|zutAC`skC1wjwGJ}dHwLxu-M@r2AFZD~n5!-$z`SOyaD8QE$IZN_0ggVgsXOnF$LA$8 z_9e6H*h--Kx z?{+j2h39>pwm&rldb?SzLAWwru?lh>2cPi6R#Vyv#CY3qT}t$(9snwiUC%%K{bDPo z@sf!~f$19Q7mzU;l!$5>1G&DwOC@U7+BK=n^RVuLDF#cTa7M=k+m`!O-E2pVJp(!h zU%mh;fUaS3LTge&NkG({o=`>#EMldW!5q+j!w*&VE{)&p1ys}V+zfbM?>(hJ%Q4aF zv{r-y-SyrYjM#Oj&YkGO@YlA>-%CS37R*dx#rR;W^m#1@pTMLLuJOS6iG!aN3xZTT z3P5tL3Vuhv(MeWMx7D-_zoqCbhK!WnhxP8tSLeQ#k~k-=DPMiZ>8R`Wv!uhvd=Jfh zF)P5Y!o2(kQ>Ra35lxw*M;JTVYD4y}fJ;|kRSf-=Rod39o3X{8aq4e!Sej=(>Fh%h-72rzs${#g4}{`wZ$I54c2pMiX? z*8Ce`L@Uj^8&^O_mfO{6kes7IGW^6Ln=|YtBO#?eu!>_>-zW=~(8jV)?ybIt0%vps zbUW|8`69jjO?v%>wNo;Y1s#Q{-u<7I%!b3+`UlkP`d%{Lp-2&FDC4%oPM8@QO3VYs zqe_^8Xsa#gXW{Lm{vKVM{s7-x8MoJ}mnfZ*@Kk`?0CxO1Cx>t#(e33xyM@DmEEOQh z-WNC8{VY}z*vp4^*B=Bsky6)!oaWJI#M(hQNM9rr&NM8T816|&2*=?B(?h-+6#KCF z!{>-lQGuq(@my&1Gx-~-O4NWH-mraoDszo()dY7ej51V0`LV~+f|FLh=%1fk0?g0i zl3s}-bD0Ni;6h6&Qw%hV@^hd$N>Nl$^dM*%GDrvE8S6yv5R&9?domug^&&vnq}`C_ zU;OyR#i7ZMF@hC#u7I=oE7_(BcVBnAmqEoI-2PTXO@DIj_#k zWoedWbgg8p&G>v&xN46Ec#8Ckn55%ViCxy_@22sqEMon@Bim#f?u68E?Pg>m;Y#5h za+udACVdn$?z6uF)YEz)BD*2omr{=(%t~4n`t*M+r8&Bmlv4IwbJ^fYWq#@-LCBl;c_{evV()0v9eAcf98u+mH8`K0k zLLGNBxYZ1@MTWOpg~^HZq9dz-+WKSQqgA=1z+ARA;Nf+Dz-z_JRoBP{g5RI(zZ)oG zL=HSXzuGq?9pe`YG(G@^!sCgzqbBM*|CIqSZAogqdEHv-vNznVc)WB%Wv&3zv%t1e zxI6*GwFPx99tBr`7nu+x8i#wvO5~+J(w58?Pk3btE5|*AeITLWrC1H1aJM6d%>CSi z&A;@i-vG)9vXwj!UHM-FAJT)pzc0aL>Bar!Q8K}WMi7z2O5PHwFrrEAMsnx#c2in$ zW7`#?m?u2556GERW-@qsWSdkrDU$h0%1e&CovDKK;(XuwPvxR)7}ZW_t@(m&ikdS1 zIG}D8^0k^>@UIe*?&NxxW);gTsmhNZ6VOP8y_kZc!X1Cu%Wrtq5|2&D@@66Ej?a z0jr^6ZQ>I`6D+cIRL7yO9`T#Yk^lYEAVR!p{pGK3(+@#e6~p%nLQS4k-Y=<+XwBpN zM76`j`SF7MguJCOSJT_Cw=+`YEKPi+p+Qo1oUICz%hY!Ut{BVzNaccPaowFNAph;U z^};-h#f|_<%wQoO|M)>i1rLp+$pls}6-@!wA1X4K9Q1nNBymNxO{xuNG(rXR%^`%egwQ@ z({f5D2mzP>7byK`7}5e#-7^_>1p0DEs^Vv{373`OEvS6u{6 zp~nYpZ$VM%9wnhuWHO`Tlrht%&=e7f7)zH5I&0!A8z&{^er9jcX1} zNZm*m0(whm(w8J;yD3)B`AFyVH(n$QJ~P`9DeI6AUKP7>+PkY3uYJTn3rzlzNY!5a zYcFRMnd^O_<#}voU6!9d9^TI;jxu(>GZHsHgGSa?X8Q-XSQ6@6bLn44>fl+}XB0rR zGJG$j^saDGYHUBGRO=xRC9uP_c(a1AwItDhT@rHo7SIl0^)z5K?|XDa@z5rM1KO=S zpm2w|Mkx9H8u+1V_Pg_LoR2-W!NM1mWx*}Y;bHrO8M{TH|M44Kp71D$B6|AJYtL<}DF3UQXqx;f`$XcwX zEzl1E4Qddj516cmz_5WeM8Pa{9U-rHm9i8D72K(Q?P}F)7rPL7T|J2CrrVP|rn$_8 zOX7B1Ltq@&VRzcx1GMKyMzkc(~Mj<)M19uO~WI?6B8e=1d-BqKfp}EZ@JMhM=o5S`*9bgIRnpe}2 zX?#Ix=|`Z)349uV@;zj%(J*byi9_DoV;QPFLD4VUa6d6Zd za)+mDaasUxSW+E8M_WnELE}hcO?HfA)C;~qvsl~DnUR>Mf4kZ<9^0-woJB6CFDi`0 zjaU84heQUFXjcy>AG!G-Uo#A^{HX>n2Yf4#T4y#fDTd%}GN^yVc*7n)({*?PmG_1) zIt&ibLFT?dx9^2j>-n+ZA~LMRe&fU&@EJ-{u&p&cvT?Z8c?@P&a0P2I^&I+siujNA zM1Kc;MP*f(Uw+C@R)2MGh%3k{@8_s|h}tmb#?0t6P@@7f{j68ofnn=yhYezivY$jz=9z3GD6QBfpl6WeXinc{fM3bArhDLeemEG^`m38 z=YQVZb9>jvyY9`8Wl0TL&RMj-@uPllYcve8_-H4nsRdthMX^wJj$x8`F=6G0nF{et zads&aXK)$|{o!QvP=MSH-@}z@0t8mJ;byo$RK>j5o3U~zUXA(ItJi2>*9P}UDEC}yQ_a)~ zwBqZyNf2R_kSVCl9B>s$WGiStXGpxjC&}t>L02n2NENMY8cHqU&4xWc%RRm5A(f}Z z+{|!erH$v3GgBsBKiyZ=dUG$e13{1gtAE9+jQLx4soF1j#bU>soh*4@2~e@j;7h`H zhty;>-)xSl;UQqXJ75GJ_gz7o#>{wVKM7HtKtfOp+y1_rNdIX?P@k#zfzj`KA=EnD zOLO90Xl^3qm=l8j$LdvLZ%v-fp@ZGCzA+`Si~iyY9&pT4pWHmo4~$Z-)zR&;E|eVq zpxm3&j69l0GHlWfg#1jW!F?C)?6(iW)E+|(fD~z#srqO39}$ID?D%gdwqNiah@L3h zaX;TYrVflozU|o@0pHZU_WjLf`fBXM-V1WQ^(zE~Onbk&&#o3@TkWu*U7VYN0__~g z3`@*LkVZ7;d;yM7WW}@zk@p5B#u1Fk=O+xYvjK6WU~^=-<)oL^c1%ciP#S0ObE(Y~ zLgnW?A$(Ga--@MNwMdve4^Y0nI|>xen#s`~9Epr)zxr9rs6sZq%BUuW>4I z2d^=NS}=G^TbbBG#y4aCRmCcq+{ESURJ#WgLUcY_Hb>WekW^VpL@PSbFIvE;$}aI^ zAxTB4iI|uQ4jHhz3!z`ER`U=l{t^40O$Q7UihPSqx6{0@@|_cN|5O`+y(-y%FO$V3 zk|QnYa8fLyQ$x#@oFOC+P!NF)FvJT1pXA&Sq*=dVZJL^~XrD3Xg8QDYZ*$)BHgJn^ z6q-H13}Jo1*Cf;sJJbDu7mSP54KhjjKNGm}CvK3_y%=52AV*M6^|!Y1Cy6^`Q>Q6@ z&td~B&_AWCJv4Ui8<_6%8oAMRtI$W%uwv}Uu)uVd{iQ`eJhCPNdDDz?00N7iwpqmS z5<&IaWO#OADoDBoP%&0K5R=UjcTpkEKb>^ff7huVMmluNBi=tM`}*E?0mrR?BT(af zyZgnj%0>I_8R4c){RcxLEB45w6^&%`(!3~vNzotjS^|Fkk$?jWtbfd-qv2~>&`@v{ z^yTO}QJzfCxBJT9t2?VfJb72L`BL@lFK+^Nf;^4nD1X}WR^1S4%Kf@Ol0mn_bOr|2 zjz59Ql$Jr*96?~Q8fCwH_riqGtCKXwt2<~Yk{TS0nNwS9x$tgt9z@~2`~y5&5!DO> zrBv1Q06uau{RA!pAS99nhQ?QzYaBxDVJydXNqHO_8Ps4DmSx2*mbn=7m4c2y(=q}+ z{(GT8iDbROqL^fJWum=6UwYg%){Prm41dHo|LT!a?kVfyq-quo&U8B12HF=5bD{ld z@AAz_@E5w^8`j2nVH>_Zoa+qW1H)YPa7PP-b_OWOyBZoU7kFrcu6g z7BrhuIh5a(xY-sF^k=SM(9lTV6Bf@qCj5QwWUA!i-SdU<(>o{+VrTq$!Q0i^U_J|i zti-y+g`vqz66&ktC<-gVG%2MCA0;b%C&_U<3pPy07)kYwTuG|WE zFz&|4uLy>(t?{jw8sh=vdMZ?SJ^5L*b0)i$gg3s-GhMeKFwVQyv3e2CjL z(*If9#9gmPyDBejC)YdG2&!l;ii0I4JG^2ozv(xl`(BgcnFZFlEVvhCCWjA}4$ryu zU&gl!1Ml614L93`a0zHMn&*_XnEQwvD6Kcf!$ZR;o3x{CY9MG4y@0&oodTz?+Z2go z<}_R8$?PX5XVnP12my_KYD<86NJdRh=(X>fWf9o{IHnZb$5P_qMr%&?FE#U z@%;fD=try7M+Ay8SUG-GQ6P8qOnWNxg^-vx2P+>rC$Uqo!uATpAwc3}^}azXO#lQtjWc_!53&PN}!YhrXp;X6(J#G@iN>pxmgx`wZn# z-y5)Tbos@`Xx*O|JBzSZVPn#`T;!2^>Ae$RBP$-vp00BX5qWDTFym+Q zB;Zw6ZGHF8tzcvw78pC8_8U1@r7~NZF3t1t+1o~H?Tp;C8~IY_VbS=Dn8ay`k_c{D z)fw*7Xs72F?_0)0CN_V}f-Y)4CMI@zLYW_PY}ay_XL@jOzTkg6xotc}jUkAQ-XJv7 zMK~q*Fb>KmR<@?`fWX^G6pd^sE=bw!kY27Q%Q!Ha{leod!M{##@HVTTf&(aO!HxBJiD@S$;hk5 zj>1Ei0llp2dV`Ycmj4gIV%U=^=dK=?+@_l*KjYH6F33=EOAV_X25|(7^L7gVjyoDgNkF$V4ku517n5yPQOP} zP&`LSvuOn9eEeBbWoyvUXk7#4!ERbM;n1#{w*cG1=q$kjvSTIdluj`P7jg*L$NZk; zgGCO;f-$pGY&B}sc-4pjqPUy#Gni5Pb?;Mf@jfmftv)beeD6m>`?bQ{!g87j;4L4twK?~k!ZQ8lpKi!AbTbB=|bvXNoAnv|}{2w=W!a6n$DXX+HHymua->+`-u57y&A3E<(><(~vujRG( z)K#`fClD=3ABEJDMLgVavG>2Mn1%adbfi}z6X7=FRh%Q}^Z;|Es{kud>$Y?^;qKK;CZq1xecjm^% z4+@P1MbU$!vHB>Y>BaB0B!zF07nrbjZ$F8jx^Utj4?o^dy}teO&(cgf%~WsyRfy?I z6%ER4@^#p{>p*o3V32{(4_2!?2o$})8jF$+YBE6PKA&AEiQQktw_e~&N$`lT5_6_5 zseNm-4<@Jjjj$^BXvIa!xZ!;8cyJ8_PI|>Z%gCTea4g`@4m~=pJ!=SzYW1oQM~=Eh zJv=kl{C=?P==^Y7?u(A7R$e1X)B=M8iF6J{WA>RwVTKqHpYamf2vtIjHqB&V`p!Ssz6r+wwBvS-&EZw;7^aFn_LJApYQ%Dok5=*|0;!bd+v35#HeT zdYC~g(V+C9yVPgv=!2V~?h_GO4zA}n#E+;rooZ?}6N*$oCJ?-qUF9Q*+*-O<7x$w4KD`Dz_s|R}auLy{tRLgA%+}TRKrec>0X!ry#EOiUo#j z>hbglGRi~(!pZ$4cUYlRcf}28^2zL75=sgPiWqz2s6mtid-u0j%dJj8pMy1hIF@4N zeG92rkp&IYd!3eQ(8qp8h`YAjj_C95$yR3fX!Xw-KD1ozq@R*i9$6?Sy&&({sI)*d zkhvO1O>enMh9P{|Ll>T!-VZcOT4QX1>v%0rrrf*7+^}*sxB0@K4RskXwWlWf7pwWr zZh;`l?dfJEo1crGO~1Bo%-m>wys)(v%ucaJ()TW>Shf7)N4rm>Y*x1OGqCyx9$xRG zkZO(-5drCz!yAU_G9C|x(q85^UizqIt3!Q6#lZUCrm>Ekv}9h)&ef=xm45w~f7`pUVwS&){8~t}dG3fyES(uQ+oG(K38ivuf&vc`t_Nd8 zhqiVH~Sm=v5g7b{nfgz0PnCknfWcI5#92j zkNkREQqk3|f<-GsU5M$BYs_}2{@~#Am}Ry5tRR9oeMPo|PdmK?+~r9v$C}$#WEXvo zp8NynPAh4qQGBmHj*TmqI;wBZ-$RHK%VRH<_#== zZ&~9K`l$C2x#c9$6%38tbeYfJ-x7t9Q8yxTc~-WyHD3uCYM4u2SBV%*cQx9RC@y9# z+^F|9%sD8n^El=o`;sX+=1>nUassc$JMlU!pX1kH9uBzDu8yh(e9N_B3pvEhMKDP1 zzWd!*N7vb>H7MA>FVqK(Qtgaz5LnETI-Ub{(1iLZvg>Q$U3o%n7>%}6r)e6ca@2~o zEPP{h>by_4i1)GNBhYw`T>zmuj{ax;(vQNtCU&Dgk8e2$I}<<@f|#Wk`BIJ zmmXO6_7pvv?(La(S;_we*UqQ5dj1shdY^*%E97OX8xFsWcwl7%qL&^D*)8ip1(OxUkNquM?7XZT*k*Z|pS;@B z**sLS(M(LsgwHUq??yb(EdXMs@7#jO41&)HN7Q@AbrBf?Z7km}YWk}^p8p#D`_h?7 zyfABgthwfoOozb>DR0W6Gri{90Wa?Mrycrvi1fSX5=j$V)ZELwXczH`K+ZBxugwI) z=^MsYrH8(Hb2hc~xzDV&G5yKdPxUtH-%qt*UAZ~+r05<&PJdh(-2IP!}aB6#PQ>n=u8_UYB?QYa9i?XB8ns_?{_*`n%^rczA1Ik^9UV*Wc-@ zBM&4F6yzS~cLi}i6er;{wo|86FzFM^FUnb;|J)ngq3J(dXOQzScg`mdS@+9>v^M=^ z;~j-#-{bFR?FT;Wntv~_ArN2DRCFSB%fVxJ=bfd3p=obh>_|R#%lPlLztio;?DNh; zzr7~=iy6A-R<~swmN5@IksT%~SOji0XCK0Qh&7M*3J=Nc(m(VPioZ$-J2UA(R7OxW zRe==J+*#7%FVHY=WK=?{0bc^eYq)Z2mn=ql*_Cl9E-Dk-i_Ynf#lSCUtz?lv#$_>{ zis;Xnb?EJCY@~Y)sT;O}kcimg*RTy=i%(Rbd>ev_+>TwD_(<|WAh3<@0EIadPPDF; zxDqMnCsY>*SNXI*bd}%zH9jVwO#E1MlQxjau)=I~J){}p7B3NkE-Z&Rk=1%NSMy@j zd+SG^6j4H19`d^EJ6zw8{ZqD%%PZR$CU_{KqK^Ko_mNF7kEpGp|C0Y-8ml_5jpF20 zi*;M3?^1tqMdNq9)?M7Z(;JnbjU)deDv0b3QHVz!=*Iz$;mzG5L)bvW)3Ipff5)vD zv3Hwz0Sw>G8M~ZhC!DUX&Vh9Hh}!J;s%lSfj5}s*w2)nk)m(0tYelkB4sBc_7er0=xn*HQG=WNw@VA~wIE7`wq)c(Bw$L~=i)er7_=khhuW%u?z z2S%wj_KmCu+|$~3KdBXh&Mj)p)_jS94BVTpgB|ul@)6Asd)rAbD_x&EypVRWvl`u! zmxtyPOqVm(S^l^gvAa~O$&1q#HHeFPR#w9FTr8<1tPOJe>D5hve~RPTBsjE=h6AAP1ICM3gZlsSeOIBg#Gls0ja0 z=HZ{pL9A#MqC#;c^%o=5EhR6=y#~S`E4^#g%-UEtYJWuG|Ccj(eKJ^_)Zus>ZDlKr zDv?(A{?^;PpyVIiM($6TZ-zZTH}KTfq*P_MS(!?4$P`;Eao<0aq9uU-B!(9DkA5pY z@R@h2Nl@q?`C`c1b8uu|KV8SVQD)M^$YD_G-p_vX5PQs~w<6YYGU19B@c|@f^rbXZ z_nliV)`iK7Bd>t3h8&+KJ3Ioa+IBd)cBXYe(S3zgo{CfI3DI(0&E}4M&V&7^BtmN4 zO6IR}HOMwi>cxU>nG0V5_Urf?aX)Fii>N}LGHa7ojiPb{Mw9uPeS_#z^TS|i(osk@ z@!tX&#E}8KDH5MAR7`Udu?6=v$9SBJ4auV@l9Nb+lU~Umm`b1RjPluah0z%ued~8D zzADUIw+g{>5C~iEvjyW!&C?@mcYi_FGynfH0_--~mPZMMAc%sW?}x3S%X;;#~H z>@b<2_AX5mE0WZ{eAc0ay8+Osg{1|vxvZOZD_KG;oP`I_5%tHYcwjD-Mbex0pP!vg z{q3}6Qh|K$I7-OC-xwv_xd6J(YZ-$6%*iT?j z3bk9$m$r&TpF-Lw_xBL@F%<&>rTVn3m7Qr3w^oc=ZM7Oflay2SJGzfaf09s6B=1)ep@P6!cEw`gqAQQ1 zQiAX>l&TvBp0a?a4f0!-?XXKj&5o-OHHp-PBVTnu5vjm*-j0JXVtpci5Fh*_^Y(l0 zPf1aG;~qEon_<=v%9*w55W>*Au&6z`i!Z73*88CXND@HEJR(X&baNlsAr)HxQP~PI z_!{NDl~(8W2t|`f-iq<;EViHo&wQ^aF95Xo7Tge_tlW29EF%>5kJ%x0=}06SqZVop zV9JVjYhQA6gI^7#N3M*0f)ET^Rs78z4nTdZ9RV0-9LLxpy$vfr!d4Mt^DN?CWG#qN zxWK7=s}3iKn6{4CD}v%z+7PfGe`=UX_ytxM95ojXWy5{eWiuu~gi*wp&>n|;gzkW|m^E>gWLW zvC2+?AP_Kc?(kTWfgp`I<@c5B4uPcM|E%q8@-tp#V*c_y!<|J%nxD+&ZO4}pU+5Hn zz5LXeB8P!uV>f=)l3g4Z8PlL2ogQKIkP4&ZW_pBNTMj{|G zDT!kE8V*l-I|X)~)M*Q3rx5CFYlVppS}a>m)@i!ZktfX$LAR@wu0q=B18RDzK$J>0 z$6j>Nyzq?Up-ML8b`HMRt6E|78{T zN2}hBDqKtg*X|bkRwf9`M-{86&(qSMKv*l0qd#w<>H7c})F`8&I(Fm4aXdrnRakLe zNDZXVK>vrK=mx>KHlRiwejqOlgJKVIv}x_U$iWK0lyJf}4%HPK%T{fuY)U-*zIBlo z*bgaAeK$IB&YV-kQ$~v?AUlG1{cM2wsi&;IJbXA>3-(}2r+DY-g7U6@Uo|A0OE>9d zy$k)ufg;vZBj@B`2`IR_1fYbu$K`HxY|e+$2!b!*uq~H(*Vc0Nt(ah`vlaeTScE_^{PC2xpD= zBKEFH+Tz(w+;j1kcT?#-AeASDp5Ar;`(4}{NM#CqmvaT>gyhy@Y%|PYxH@zeTzIA} zlBdCDH<7m>*_LH4$9GT!&JXEXk1gP&0-zO4L=GTDxeHXKT*txY)Ur&};zRBlL^PEY z?2hx^w@DJ}2^ydG`37Aonvt|itk6MQv%8_xqbPmcVO}38U+@ERtp79uX1?zfNt3go z39kMOz+DcmZPgjWJz`Q;aP4rJgr@@OF4@c8%}FWPF6!apj0eb$lz@?b0Re@7fkhb! zZav#mT@_a$HIVxQ*#ptBMEI+xKZu|vy#0kY+*bI|eUEJpF2}>}d0iCrrNQZ!Jka_r zd=r0q0WZ86h4f%feUB#~1fpT6d_k<44E?Lour%?+&J zU7os~8M%9<$4CPY?W@cTJ0SxRT>%zUg_#)?n4XyvuspDwtWQGr>u*v!Ul-ILuS_w8n3K}j1I zA+NU^gPr*utIX_a;_a0@?f%4a9I9G2G3CZbDd4giRu4L`RtWNHx-u3Gt%cdt#Jbvp znFp`eIgZe-os2TGA)q~d>ngPQwn_i(Hwtg`UFoLwWd4EoRO$8p&$I_^VQ6`F{oQFg zLcHN?1lFuK7RrDR+cj9n9^7oz!HxhGftr>@3-Wu$s*nwfkbON3s)Gdl@?}sF z9!ZwW#4SL(a>iH%j%F7EDeC~13PN>aTu30cBQ-ntW$Gq1l2x@50nlrpG|y^fE=X1W z10X(Etkoj+KuR_?;==(syodMb4W^#9JR-VQ5nC@0>{S*(`Onf0tleJ#fyeE9QSf0rjw+`piM916EbFN@?j9|9ZYjUVbXqn?0 zOb~Ldl&n_bFVG@hI0ap6CNNR}Vuv^Bt5bV*B2L)wZr~!9MfI7xiC3XYt@z6Z9$^Am zR1eo$-v^a~%X<>H_+lr!%i;#DK5$tz9OX2muaW6q34|Q|gGcU`=x%x1t9IVd{VB{|=inl5PDJX6*2n@rls=%%>{}jBh5oV-GQzhw? z?WqWLR8i7n*sca{9hGV4`#7{)agL{24`0#$tBE^lq8$@H`2EY(izP5TlmZLc(@$Lg z86_16X+R`-;N~8qp?@iFT;@#1&py9d3=&%gH@5r-)s`8n+Qevv30H)vurThW7L-vVDY>cGurTIibfFBkbW*;~N{{mud+KvF1h z9J<@fK@8A9E#0J@kdXKT(VXBnc%vpXC+Z}$oH`NTlEO(IIPV_NVkFhyri`%Phdjb1_e*W%iyAz%6k!L2U2r_mTl2hxOz3Vf)n~( zzFedGngRrCvW5|R-_ox_H+jlqHiD^jdQXxBRA5Zwz>sl%_n!|d5J&Pf^eaBq$?BRB z54kVSPlT6xEKdoso?$u&dp8^uDfAjblUC=CsA0lOt4c3y)5V#H)HX!=|8861qxF@U zEr1NU6{@JNXVVYbxp8@$c0eY-~9uc14@MBIUZ z%-D<>r@7`u)#{4tq^>>#igMdPBW$_bU2A6uq+6?3ql_J)bYO_ktU^va62V4HFTyP}!LwU-K) zjR*@gC^i#;XZr&XyMp?!f>kt#@F-+QWAJJlqBzt-$}I%8)|N8WuWX)wUJr|y%0GAy zVF6ABjeso0e(-QS4z}_sh6`%iNc4ss-0TzQfNs+Nd*nptV5F|Q2nZFjw30xiMuUo} zrE^y81<;d22OW5M-qejAmg3wDSR8MM*UD$Oqr`(S1LKIjmY0qw=ejR;Y&&v{24Pe$ zFKYF`3TMFM{S_1TaGMh#H__i>yIh>QW#Y9bhqHV?{7Cb^45FYjV0AL2Wq;w zk6x{*z?u?481-wfl1Ny-PXmq-1~2nNdjS7@OEGkMf=g!+Q4cpKx+>VAf#4J_%ZOwE zg8;QjDkvgG<#o9-#GH+jpaEf=ShaP5EG@undwJx{$lf4LDO$C46B!`u?C8Xq?vCMF ze2n^s$$#sp**}mT?oLlWm;M*h9Q>d` z2^5i3+YwaznG}%SOf}dUuK&^zK?Ff6b6XL>f`N1>1!}N6lc-^_=^8*(JtMtdRyVi{ zf1M*keO9qd{R8RYLc9O5bk$)|c2E1+C6*2WSz1J80i|0S1tlZ}lyCu&ln{`mmsX@y zLVA%_B&2(3r4^*45f-IGK=M1hzwb{juWLQeIdf+2x#ylah6{IqJM@@S&2Hga`lIwJ zCiB@q-;Ub~H53&#s8*n!@YfE?cntF17+}=#+kt8qu;(s1JGC@3Me1zH3KolYXVKNl z2$9SDw;|umYiK2QcC2wYqu@JFNR<=C&H@rR@wB>}WRZ()OBV;z)c|v;rBd)b?D?#* zLGam&z<1&L!9+UI95vdRsOu2*&5Q9$)>Y=h6lJkB44^(tt>HFe{f-E-sH@=G|M?|Q z9m|=651tq>NqZoKNOY=x?L`L6WXt}r*PSN$opcgH{PN$*i3hXU>q|{0>)ueslQ%1m zc#GK$33tGN$+vk~(cq;3UBee5ta?z2E5lx9M@$p-1=kJ7DW8$ol(+tcZNMCWKcv-*m_?yrhtVO3ItNGjrB*-6qG6L$V|3d zfP1(a8b{;WAdZK`Z`tu^Q*fu*W0kniw4(W`mkxlYlWyFlF_!4|u_h6sQ!v!kMn}H% zF<_4}ska?KIVN-)JXV-KFzLWubUk#o7YUyEVdDF?*?PTf||D7T?Id&)zM8ZSjiL%xoY*OMd`72*>_$hdRF z?i){M!%~I~#aif8{;Zs{=J2QF7!!GtR*mXYEKG4L`$`bg*Rw zbmDkl2_=Kx63FDknOFl=nf$1*=I_@qqIDb=2pqv*5A+YmGstZxhYj-|3_PLv z)tGT|)wwm`u**{cCra(5g*Yfa_f8u`i0C?3GUl2tz_vh~I5*L&?;JsNGyHuCR0=?Y z$40a`l)wPK=b1^BTJ<>_@pAIY&r^L3X{kt$`CB{&=)3)4lEoGjHRRkmvV%#}0bW@8 zF~_20+^ia|V%Ws9I(M*hZa}8SLLFLje5!C7SMH<8ir3lD2$Ou`TPu{@co`bjDxj<2 zAqnAB)*o63dEVmKpqZ!tv)?4Sh{dgkhVAC?p4H16^!LCzy6&9<>ETx7Owi*hqiIH%be` zulPC?%Ky_@RbH!$OL-qoca2pW7M5Gib(7Rne!KBxQXv4F2jrtw&pYR}bZBv5+xkX27gSlHJX7@}*fgv4VgrhsUsC>=Dzibs_cSA_i-KgNl~N-SFYeE4O>q?r z+C;f|v2Y2D@_|fwb2YH1)hF~z&B z4R9lVQxuJ9kImFEeMez}?#nhaEfti~2s~%lGvKKx>E!q#CCPDQ$$5Ou*Sszqs!J2bAf%K~6p#w{J{^5xrPmbS=n zAwg-~FU>vr$O0V2Wsn7@wv4&_!_{%ZIvNLXqP52qK+p zA{L^)4}&N^<&a)b8T4hurieF^LXDK%IXlwLe*tDrpa;N310=CHdr%hy86kKjIB50k zX}jtxqtaU{toEV8>Lyvo7JHAZX|$vme1wQ*jaA56oB8IXJ8K-T0F-l8n-5m-Vo*(u zCp2z^IkWct^<~k{8XLMJI+Z)hzCu1+;AwOY>q+sb-S!4b)Gh7zqA7+%hBqV_C!K07 z%j3|vxWMSb+7I9~#MC%&V3i91j(4jtsgR}C=Er;LKqK0AJmP`_H=65XpdaTrtNVPp z-ZDymdg1&YE&!os?kkY#>eI|m^uwZmf;CTiXNcv&kCT=7P%GC~9hZo-p4nx0I70z< z8JH=&nXTnS!AQa)eP<7M?n}g2iD%Gjy>F}7;26Mckyg-h6C*D3fk<9sedYIEGVV_$ zO`hL^NZxrFqeA2Q{ry+YH(fl^r-@Zi7r(sc2ZFtL)<3$xU3Ax|3?JuOu5pJ}7;SxJ z2CvS^a)etN+woKgg>HJEml`#&mF`b2U@H$4+h~+l^e^Fo-CUd|n=&vAB3TsvWq)U( z>PdD~@5k}YZ@|LR39*W8>|t6ht%AhIDx;y2@J+*1wJ3Q1xsUyrNHZKx_gwrD5n7vF z>2Vae$>(ZN?$EXAF2Jl5E`I|>Nj&^snB)lGkI4T;&zU0=$nc35)!$>#gF!Z@kE8-T zUs;$W4ZyiK-mRDX=2p7;<0P-OI#AAl<(&{kU)N9qAZ+DLYDweJDk+x99eQ1M0k8HS8a~kA>*pFwHyf?% zKPrG_T>f!YJfq0uKj+0`NQqo%dTu=}kf^N0q?Tb(6C7 zX(=2tcW57wV%5(=0)*ZxkmEUh`wbwH^9jA4Hd!~%CSOF*U|XX=P){ae^&*yNZOv1T z;g$LFO$_nmmu-wTVlXq7ZOe7Q6ERz;LUyt6!EhHc+Z)u09hv@1qQb(ZpzGtHj9O1~ zNz@3~GZ!I&vNsqcxIanDXxG^opRrg^ioV;u^OkS+<+WOo?*(l}omap<3>{A&Nkc{7 zmQ~pD!8WDok!IjhuK{-!0-8yTH}#-p@mp6zaWkByG(T2e@#$pklv_iV+#_;Ugav5qpe<_PF>2;yBX-n*!B$8!I_R7ZXV zyufte&b3kv);$>eCzxsY24wR7Zos-Z@-ecSu60S6YR3EtHbMG-cOQ~C=V0uBygT9* zj*9?x{pa62b#N)fBE<8WgGQT(OC|Gb`8K|)$u&GhQte0jtKoljL?{guf7V8h~L2__uOJQ2d~ZPDR%G@B3mMk|@JU_kf^3(DfD@8paC~ zWa)HdG^H1&M%`frJ&IEUxBx30#*j%1iw`Ap}8GT(bURu^@04! z)1_v;AS3W#9o3<0vg0|3*XHR8E)J6fndaNcn!_APwk=s(iSJ zB0-OmV{L}SZ+~_yf`js@lkv21b}bYBkJ&z*{zYrKEl$|6N}c-Z?!^h|V;RKE$JuNM zSlz~%OyQg=lr8mj?tW$ezv>@?5Xz3#$SIR$cM+d<0bMu@mLCIB5H-j1MEF%fE1TtU zD6xpcd58mF0q#nsOKX5nlRroJ+!|m(&Abb~zdDvWlsM^IYM$DaI-Xp#`qldteH`G- z-z&iky-NzOyWR~0RooA6X(Pw7iSh5R)wYD7iG8vxw8%$mxkdd;5wVxWhl4X4yrBJW z?DoXZYDU~P7NAmiV^}dbPo#4Rf-1>@tV4N0$c!CY zavk>-GdN^Wx#QWiFDOq5>PG9xb_JjS{Q~@2xmq2w${2h2oG&zh+fQ~x@ z5jw!NK@&N#Fb}~|@4saT)QWc0R=EfXPVLO~K7_1iN0p{aH4`nk|4tFT$hZ0D+Umx1 z=*e4lzrT0wc%u4&jh)QwR%e_oW!CpbDF^I}N`<%`j_WFcv7ljg>z(iWqrk^WM2h4c zj>pJrK)rKvQeGD)?zjL`Q|?1z`1h#~9^F{sZ+-=7nJ>B5MBTEe9;9&*mvez zmE-jXq`MTlN!D4>XJX}EWY|SnrU?vh^gxh}_mDpI6@)VTx%09cV9SbDD&AT5RHTD! zavGV=0C-aBJ5Qj(Kq;HMfnH5>fjH$_5q;gY_SYg{nOn)>lFxtWg36xH6U>8K&c}db zyx*#QTee_#CzY=0pq zMF&1S``vG+WP4}MTsxB-{rG#iI(UO=_&CT0Gh|hxLUSNr0&d4(c!kBOP>>4Vu!+lz zl@1%GwSQ&88dCakzDnx3XyJz`9z!`GGVm19zjM3U?$$Lsh;kQ0GG|3Yj7%znT1mIZ zC#fK%zuPTx)MF4P3KR?Ib{&QiZ1qIceD5D#25OMikf7_MWyR454;{OOFN;Sf=Z3DU z`!A7M}drT))shP}?c>VP{ z^`q~NmuJL?QwJ~n|I*_D8~I8U5NtT|T3G6}FVG~fgvf~(Av&Gg+mg{FZpq<<{-7IN zTKgNDyc<^st6!4dof_MrA<@0Hkrh28j2vJ@f7!U;@O1wta0zuIw1~k=S<$iCOSJLZ zWq(=k1Uy;&RY?cc^pvqOyJjL6l6SHo_w&;?02u)j5$>jxrQ%c9<6qsXJN;7gb5%We z`T(YQGHufQ@f%|)cBSF&;f|VI#n+GzQ=umnr|CC6 znCd{zU64%VYAWkjlifmeoF*VM*#7<;m#UQk_MKKZJlA{mJ&7U-%_+U_qJ5gtJm%%e)jjbXSZE zS@5kug$)4~d@A^(>Wrs?9usw(u@(T^lvZ@4aHxeUuC*i0#i6MJpIC@=qQSE( zcK@q|8IQ!l>h=XYF`zvPZ%Ah8iwrLz4JA1dkTT9D`Zx8bxmbeo&?`!BSzz*wYGpGU z^K#^7VGQuH1}*!5r@=E|oTowJ*7>XYj{>sPDVx&m0m;4=wwC;9T1bjwDOPV_UzN-! z1|Byp@_g+kNd^Fp>uf^Q?D)pjj}Y#`x!q@Ms@aV^=HXQE`d6SFkhuCl3pRp!N+xrm z;`R+-PqfqpS$9>=Wdn374#9`rD6#w<1)Bpxg3mRn%swR>bSS~{*u^q0aZ5igHYQWO4cq| z-kr$@STR35d!5NmGVb#kx>bOW!T(8L8dTdr;@>`CCaOtte+%eSEIU@GO>(gYK<&oFU-Z6FP>SVItN)L+b~ zEM1d}0rK4smQTIcueaAzZ8A<3akL}%zi*fVwhLVeBA!=+HMG;cV#P&u3I9akJ;=BJ zrv-SVHA`W@9tk` zj*t*B4ygi=6n#H~&Hf_p{P|$*9>i|Z6tue_&85)rtZ(jKYIqol%p|p3!V#(5C8>M# zb?^mH#8Ui~mt)^&Wn~DhU#Zw~i=yA#Df_5Gg^uQENbE>4*$>QA5lM4IgA) z3Uytal!%&c7B|1q)x?cOfDd6*83V%y_P-sg;6*+BXQ;Vz{MAeNf#Bh~It9bTw^RuA zm6H;iIqU{QCIr@`L7VyPhMS#nN`LQ}UpAV%w(0_6ezqmDc$*Xx5rMtf_+`KJqlb>> zzNGPMCK20^L*R3uhi+?VP|~n|o$5D{Ni2MCwAQWgFEjQk!vtBcvb*;|*s-6y)IDL@ zB-X2Lne2G$KKQH-a4G8rm9$iHUqSS?StRtdVb59f2QL{nQc8}&S z9PRhr-C5HGj;k;eIzyv_Vj2x}AhZ(*o%kEAY+2;If($(#413k!0DL&l`(Ms&g-Ti& zKCmsn=3Cd5>zn?n`%QAW@)Stcp&Q>ei6E>V{WO`dMt+;Hlf4;5=>>T1;cmk|49z{t z>eiCYf#gRf47*}}UR~t&?o+W=hO(k}E|3x}{Ot#?W?Dt@?e1Zsn%VGa3g^p>+KRw= z&T^`-oi*qNn^9jj5gQjRRp%?)Hx3EPzkJj-=IEfl7MxrW#i7uC}JvGafD| zQKzp)=e)K^={AE^2W{=-cf-FOfSGz0E!Mv}gM+QH8Nj=}6-J&a`tCntA1%wH^v#{` z{mMCVCys>wA^r_X@O9m9@QYm%bkYT7N|eMVm+)}dMJIf)TC~Ff<;7oIUr!Qz=r1do z{rNo7O?ZiRRrlDJNljLIQrzGt6yo_|^Uw3!Sgbq}dsf^LThTVcPWceAPB$@j8D#Og zBZP&$!yxl_hts6|pDk=_V`=f105yd5C36Ywm*rXvLu5M*^Y_v6lx~&e*-tx|<1X9u z9tptN&5IsqTlGCJzMtg|4yu%yFPG@X0A)=BC~#gPcOhPGv>6h2VFWrf8Q)I4LAt#N z=ECjIS!bb=+mFE~YfMo*og}jq7QP;A_MD)rM5Qa^xOL;zi8A3aKBe;1m<*gu=#Vs5 z^Y?VTU^`|ZhC?^RedRh7e$M5}0iWaXhSJMPi=59i_n-EVcp!xnF~4yqUa|Wf3jdnA z{CSA;IVYz;?)OO~)y3n5)N0-DGs!RJBIw(mQT(^vEO~YUJNP z$quRq04Dgv4Li-Z@K>5pBzdD$$0ogTm6b#TpoI;(oUcO6(^5~!WNpvnV_-d-`?Pr#>e>0o%2T>N@v zVtI1d(Jl}71)y*hX-Vh@kU2oHR~qPLTSBNaZlihd<>ao98H(o6f#+MKIxETr0TvT% zk$vWSe26>q7*&TpM)2IuSfpnE#Z++YhQ~2U@<*om=ulIAtNVg$JYAEP^imS~m$8gB z!tmgA%~M?V)CXk^SIqmp^Nuprz(VqGq~`tOBdjV=4ziS0A1TLzDZH${HPgN@UC?n1 zZ4!Ul?96>0l3ssNven?zcfZdXJCs4ukWdvUKcTEYF8$)Oq9&;DuTJX&d?XB2QvhOD zmu)T@N_S@?;basJ?%R$_BAECP)5z(`IMxl?ZOVyk1PuEPdaD?XMH;zAL4s%$afef~ zFmgH9)Y{ssopmyu8~x&*d;e1E2}1>qr#G|Lckgf`%tG!S(Q?#Io_*(fFw8vOT_MJe z0$0+l&^pX12uT3VI;6uGgs^VmDRt*`*tGkE?`%%V^ZvL9N#;cr29mq}VkufgS9fQ0 znEba=P931Q$~D4dUzUN6?sMMbLTy5WEVgxU%;g2rhBTG_!KW&0qwbGBipdK6u8*{fzjN>itkjXC)S?o&f!E<3A^gq&U#k;e4|qmc(Py@RJWN zB2jy&p)3~qFV(|~MQzV-V4NPSQaxWV`bNxYov?)XD6U99Mpo*%0(HF=-E(l}oa@at z9ZMd#jj`H*L%5Alz}V`3!Ysc>isA##m#DoLC2;LdvhB7zkr3m|EwMGS^eYvw(}7)3 z;nkHz>KEAHKGqx)nB1N^bqtR%-AcVwF#P5ff*)hYSde8MQ&p*VSqZQ*w4z{Y9!u&^ ztilVi&xQ3&O0j8O4|}}1q5GOLgb85x8BY_IN>cOql0pK0M3&yq&u-X6h5L43%s97? z^Ae!FdFuzvl;jKC^u?<`$*Vq^9b~Y?!j?b8ziG42_!E(~6Kw>*Ixovs&r$vn%XlzuYX`(*=U$q(9LoV{c z8(hRKME<|(*_O#2Z$M--_GA#I=|U)-<+J=0`l>&7cAjP4Pf5aef}jrh-tU}EMZTR= z81X1I&m`J7C!UBtGlK3VNSlE~yi@N58+NTWT0Mfn_X3pek!OmsMX(0(N;5wpVOxw5 z_J|l%+q>K^aIGwsbkNhL$cLxGtt5}>>}OU_XzlKuUh5497X;3$d-_LEZC?Ge{>9ak z&wJ0YL44L5V`KH^GKA#6GWdIDU8hZ#xN%AI^HtUSV^s8Iq0E?^PN*Az;TGNd!Tjz_x3UmvZ%26YGHT=8Ms!>|Ef>EJv8yX ziHFiUWoI~wI3oYn%0I*Q@hi8xJnR3%3k#t?x{~C>xQyLCITR%L+0nwFQ*97zod!`t z-IjlJXbdc^6SEt1(^3PAzBjNJBqCsnj!f8o0qei_qoy_=6s<9?1@)nPX%#yxh@ssu z&0M*}%Ro=N$+W1aB{y-D;}2OBt=^Az@9#>vu&3sR=IjX&aSUejE^-gMir$&ucgVB1 z6bhC6&Ih(R<}lv;!X$jZF6|B5mVpq-HFLR+W2I5c0)Pp8b>Th6B@xksI*j=8{H^ z+2mrLif<+qN*g)x_f~f{?`*s;1&DtW8~9rjgg}AK98lrxXiNX*$DQo=p40ZkaHh9fFFu4Df=i$rhVSGmX<@^W=>gTDA z=&{rlX*LNp8m*|0wtG*o@<&(1osbRtAr`gK{&YdL9M;!KFmFUNITq7mCD?Q+U6g`2 zOgmY!PKY2(=!WdF9KyQ1n)g{iRUz;v8bJ|{p7k?iMGx{(knA8;91bGj3v%BJ2i3-k4>(dC zo{Y(6MenK<4e|E#W94hE+ve_3)(KIl1bhIFva5T-$tsFH4{6NnMnhBTSY74G|RG-OmRfaw{vny0V%Ck55od9|tb0qX>X#&6&VQ;07}Iu`h-V8bCfb zat&1`&xT+5wR!hHm{kPA&GQW(5?`F#E}5X7eWbxX%QGiGsLglP3vrMrMWH#As9)#W z_09zqG-b<5UL}<4VWmh>HE)Ec@MpATvS~ccrXhYl`xf^u;HCF`cYyF}yKKd`f6MsX z_)hJiG#1k!4u1FE;iey(;Mg(kk1DH)JjMV_j~F?aVs_m9taJJS7*tI*G2OuAZ@7bP zBbn2;r>j#BU0`?y|NIsReRSDm&fE_VpSc#5qyJGnv!jiTK@eT)8q-NH`Zw#= zZP9uYoETMTgr!`#CLp#pZ{^ad_j^x>_aUAlHe>a#ujlL6JZQPRTtPY2MpVpn@=M@Y zYcno4n4#35994{PiWyY%5$NaFc-;bADOdpUo^R}kn46KJ!ZV(O2^B?U3_@LP7Be>4G5DF z?>^!ImGXQP=TZ-PZ9KugUdg`L5wuj;0}d!Nlgx>@z({5_Ic9slGgism$ll`c_-NuM zjB~RV8bArjJ%*!&^)+Y;{w1Dm#nAz5Ae1ZqQpscgE%sr;PQpUk&g)EQxhYgYqy07 zt0xa)lX=h|HE^%6!xXvk7G37tm9p5UsD0PG$N<_$y}Y@S)3Ly~JYM*U$v>ebo^{u* zP2Tjv2JVEz$KpPF`2s{Zf`c~wX6$c5W9eiMrC&tA@S}Ydn&+QGQD9a}Mx$FPiZJU) zQ!+0)nGLYnm_4rp5LFBp%FP72jB9;&rsJ8E{;&6Rs!T^wB~akAC)3UVNE+w?3O@ydr| zUiOUDGl4hsgyNs|PtZYoz!BQKT{V76`=|z-*{!$3mgk;Px^n>R2J{FOCJvyPuE+Ze zAH|#LW~t7e$#zr5nnK8&`0vRlmpC_HA*O&BNro@?g4tf`H4^^P>x5o<3(8$BjY3A# zGmB=oh|5Eh;BY8pPYS|n7UT$i1$t;jeU0_t)}04Oq9%N~cSE} z=(9>`yITfeqyOPh0*>H^X?E&KVUn{T1H7X;(q#tsHCA!wrO)ez+7-UM^c7UPsvx25 z{+N>p!|uh8wXKQ%n^>+4jm2$@6jyJ=SmkQeW`+8t4(U(skzuU=d&Q`@Z zni1?~t{31KkTn7vz4FdX#=sf&MYi5m?679Xa@R$xxFJ;7vPO?jw7xkx543W9h^%O;2dm@S@5iL^1_@=>s}^&cztTz?ctfJS^MwVTZtZNos#n2N#`{la zhDweIEWOzQHoy1l{47@;Osu#bU$A@vsd|o!_ob%o^vaQLVm{qu+H();s)@SE*iQ;W zhn%w|oYC9!Iqbq?mqG^ase5PH`H}#GlRP+eNLjkS_7sM1>}KCG?q#7eD}w3#74w^B z!!Y=gK*EnHTtP7y)St$JF?cV!*k@39PMQcOiV+cl+m!V(UrKYSpr93Yb{(y}xL%0@cjof5@|J{ z3Z9w-;x7}l=X7pm7`&;!a&9;2U<*p%GSFke!{W5e9U#U>fUp4cKG?! zWwlZIR> zr>Rkx#^+a(MYh3M7~y<}FTtumaG3_9%V*(T;CfX;gO3A1#?*cUr|#0%!p1CFkP-NP zrrhYC`&w*Ww5#0`VwHBkhYS<-^POpb#YJF?ZbhS0i`cc#O(np-hv#mV-9z{k`>C=g zDPB!8vV}LKZMe>I63SfAeI$_Sc6vAo_C%nCD!bvGxb}{omu?{s>}ngpt01)~mdk+6 zvUE?nF}afyF*&yIoM`4MB+A2PZb61i`VsD3xqy*|#+uUHy^$x^BK@G)jlxHy#Jik5vJ!$IhXoNKFTOl5dF|$f*aJozqQjiDLcmpU zk^Kr%E_dlo5%b{esl!U_Q)F2`>o?d}p8E*mk9AmlfjP0V79$(+Lxtu~sFgcAkNq`F zlmP^sF>2zw)~S)fPmps=SxSGXhUZxEtZAK@6xef9u;U)95hZOdXV$O2fEf|*WAj~9 ziZxTi42ti0!p2IGkEZe=1vTBh+kbDspqp{P9kM{OPe9hJhyA7QuSzlv5^4=~+5!H) zN9P{tIuW|9+j9GHJhNaX%^-gt2r$R_d(UwcHrH?)fQZf;{kkoAgGT+qZM+YC9dGV0d2+u( zM)XBF>Kh`)@pS8tDk%-u8thrQN#eb;i1BAxS7$An_%LOdE;p-Y(oz~ip}q$1o-5kA z*qX}7JvI;+uf;##8*q=|&-N(~LVbuziPjC2Dg~`UM~VhCmbD=+33=#{J0ll6q$6os z;sCmq)}pWTSokgpnd&YjroMATzB6G97%rij%-`+yAl_BWM z&2U`By}AcrGB-ia;0nzLtCG0vKc?7x*%7NBH>)f{og-)Z+Q>}8QW9o@q~J!gGu>vd z-37VLH+q?MbGLoZGy#q0#nBf?2zx8d1#kM58XW!7i=AQNhGCyc?h2q_P-^<=h;iCag=1u;_$9a^V3*xTR-r-5kK#f} zoIT1N0w?WwGj%0CqH>L$&n;&W0VHU|bo6#>NIC3b?!N@cjV^`%<8$_3K5qNHpxUDrD0T|AJQ%2qnYXsyB?^ ztV?#XmO|))qT&(_rfx^yR>h7dRLu*cj_X?9M@Gov{y0~i`D)NRxD6(z(j zOZ{Sli?n^87MkSAHH>r(Eb;%ar^;vzR4}qva>Zlt{$3FFsX#1MEaO%92-x}p{3F&J z*`%w(d3$QJ-ufB{aC1wRKK6?e{B(VJlrn)49CUNZMpMLJwg+@Ya(_7CWn<9x&0P(K zR-9kt4&>5&kQe_(YGt^{&!G1`o!BZMiyHj>N-8Z~yVIVrgB(`GO~Kb@gS6?l@f8wZmf#(Ogah!4>isFCWDq^$$T&jE#ia5 zNh+G*HVI1$HM#^C6?CM(g!}mJE%Jqln8hC*pCZ5d<}Hxo(dz5Icg_ch1;V91>o=Am zJX_!9rig%?9(q4MC=T@iwWtXzEmvclLS)#sliLV-vS6TSj}+VV693XmHYP}5U#%;W zzon;rT8SKz9*q|!%bdCYD~-u->D#=m%>}|;-M6Ao?g5s}(I&$=Wr=q|kgD5z)j%e~ zW?|m%`Mfiq9MI5OdS1i4e%I5r*AsMuKPX!9S6cqwCamsL)X*Ze(U5qi2h*;_gitMm^8#rlE&V;VGnbGNs(5iXzD?uw~D`^_@iTwO- zsiP}Rb1>`?AHSXyEgpR6!h7_K`7iiupNQCK8QDoV(Ho_L`6_`cl37P0YHS24>^tKXkOWav^ zV_IgnRHZ+E72nA9R<^mBkt1$~1>scvfW(JKi>)I;+V%*^lyb=D3PH|23GPD>hqJ|o z+7K!2=cJW8!>}2Vx+x4n_Pbx$TCSddkUdC7hNo#DkMH$*7N~6*I^d9E8&3^<9Tx9HS~UDRvLS1aIf31MFS?#|BXKXe!NE9LaZzeCj43WvnxgPn4)-n z8F&AWEuDnvQ z&x_kBUeH4GSG?AK>5qLWgAL-JVFdq+hxV~n;`2~g5R{rS)st~R(thcu}dYvbi?jzcu zzyQvSHnzL%hws+jK%>xX z^UUJt{z8S^YC$6i`?vvpjS?QM-;FWE>uxNTX2FzLTTQ+Mv9Zj`S+u!&8QIvo`0r&K z@ye!Z+`e;M!gPGO&zz4VP7^&i089BlEkI6$v-Qk^38BTg<6nE37s;TEjdR*65UD$r^umY`j%)&_|ix5{pA;ebw&xtMxlVdqA zOiR;kH7>L&n7cpZl>9dh!zSrPmGLjW?g~P1f0~HujrDS2-x!S!r6kfUsS5` ziT=|Fq4~~s`_^~`ip0qacGM$T@&AO zIx1fjp}8b}A@ zzejsofWZW5u?{YPxI?C(;><=4T|5O13*`jmULnGz0*JBN8hi;7oi41NuB%Mt5^y}} z3dq)sd_i#w&^>y+4>kqOZ8<18tY@ITKf|@q(Iu+zj3Y+te6QkN{na`4dM;w$@+j|L zx0na-t6VN;18&xHammproUwP2{tU;L_k&Q^iwTIU$bc;s6ds`?&gJAvJg)WT1p)cky1+f z`5mrkd9rHyro0y%rc5n2jMpzxcd`Xt(v3hN7f&e6cSHz&ImWmneT}ym%{3=@mFk?W ztU8iPel9gwInqCEyZpq0wrvD;s$J3{|6OtSGLdfZn5_&!Ed6d~`&2TIFR!p&ewoXL z!Zqm8A}5Jcnmph$E`iIS`|^Iuz&vz+3-)|&3J{D-e`L-V9}gC9l3UwzxT+z8Qe*|t zskN$s?zQE%V%Zq_a1l~MyrdQRPqeIWYhX>OJuybq0p8=%SDcrVJQ+-W=rA4 zRl=wK@Myi`)Rq_3!M1z$%X|wWA(!yQc}RfSPo684N82PL%ejafvhj8Egx`s0xlE z2~uCK{M*+0Hm!W;0=h(B!**S1f5N`s`=KvSsVQGBLm0tl4KO$+05pBl*HDkVUB+u8 z^y`kR@NQ~rNGW~KJBCK){2lNOB29b+`cWOc>!dBOP^W#DZllMNS&epk?Tc2Xk{=5b zORH>{clrnT?m=>zMTj1`Z6nIo?D29EECuMo!cu(tJ{hK7b^E+T0o3*v{LkJYUE*$R~>SK%xwRpBzA~gZBPauKJ&2I-?-wtVQENjjuS0t#S8X zK;~s;U8y;N)fR7ZAA=Vx;i}vYn2SNtyKjV%vh(6ie5>|)H@re-A26QfQGhJX{nWx` z$StW%=zS+~2$L_$W=g@GhBDoY%{QYp6n5^yEmv=kw}l`TCo{(9GrUTjS(0>>BK%a3+ z=&001g4(0Iu11xQ*ell&E2zO2&C~F0ig1dG9qHmMDi_joUGJ{>CQvQnjFKiLdt_a22-p2y%#H$Z}WTrN$LuZh8{%`k4xlpS~ui|P-MapoOJ{dg%A z!n=f!{UQVVQRW&W9>`Uh&gB9LKnSuT{mu=Ro&*9u z05toTNU#%WG21FHzJTJY_z5<)RyqN!NT^myB0OT2z2{Z-luo%ILO~%DI7uoJtVxL9=0emia_rE(GQ7PAD zLo8WW1HRw_vs{VX?(a$|&In*nvGie>C-n1nv0Trs$NAS?v-^UY?(!vgwsFZ84~3R| zs$$il!53xijbUs*_VP)gIhNnNx zI*_UWS$e}}y`-fMv1IFixNOQoi+bnj2!U1(K1?ITB_x^~z-5?dk&+-0oLt--Hl z>P2#`gkb(w?Qp=&F7BWDpM#Mf3enqEAT&`ahkk#A!FTXn=PA{rlZQMEHNH+1H#7bK z4aynUDJnTSRTaIG&^Nn7TFb$`rpN7vmU?FRTgi6SeDm(4o=vqnD~MuJbi9#qH^^_6 zi+)yGtVH+qPZLcJN};@q0G#X{r_$JlgyP%8$X#>vZKwmDtYb@FRflxJTgSMAqBqGt z&1R@NdW}Wetp3P6xCh}hiKX>l+N?~s&&YB>pvwgE`@!Fr?^f6m#P`U3O1#g|Zb7(x zqyF+0Xi$6ukVwI6&@GJh?;b8dS2z8UgtMJA5h>B4a6dKkWeESn$PHSIni)RJ2sSqs zTU%RBM!2dqZEZO?h6kO|{i4ngAX8+cyj*4+WjG5N%~BTA2E>uBN%J9wXconVe)0nS z&=!nsr>1$QRM;WwrphO%tFV!S)QRONiFagn^C@5M1HbAIo?TVo6$9HBl}F+u8zbwR zqAR-l)<>Fsm}8$3de?Te)u?Y z%N6!HM~@7*Z1*TOeEF@cL7si`1JaInLF%u>UV37qq<&Dfcryky4WbZfG80u$g5ZfF5l=kV`O-@XMr^!Mx06vFzuslSU z40BZ#A&DBaL zoVhMa4DMxuZhLq{Fd&q>mh2dzv!(W-U5m7mJ}c_wX$gN<4%^ld>+`R0CWbh*vDQBn zmpN=xH+w`xW|zhg<6`VtuV zC3vePqOGRw*Ui@x2@#>pddF)Y_Bx?IiTcl^(1G2YTkU4IW^&>?%MwMMS1nkiXqcK1 zl6O%;bD8zok%F*Px+7`>*rQ#%cN!B0K@$s32LUWeJ`z5w?k4nwlh)jeHLUqlizTJL zM)^C2UW8nS{c5bqKpRbaV`?G9b)aR!1BHxIY-Pw;XNNd19FMYTLbG6^BOKDXhA)%N ztr?vIaAcD6#Q{f?ZUY^}X*i<9VCiz6cDk__JS3vd#H&Hbf|_>wM<0C|f007a-ZhG< zPm~InP4yea3zHuipS$K$;;u$2r$1m`9rg?@Nq)PWdypsXep+GBgOp0U0b^s{r|9;r zo~dy5Oe}kJxs-k^^5FS9J|*q0>kDh4J8;Z3OW}&N&izn%fo1I2f=rY|&53W_uewUo zY6OYt$i$8@Ats{8Q8R6fE9m4dA4ezasqvK3Rp(LV=-j=a4I;>aVWoMy@zjV=Zc8ZR zb!g|<$g+L4G=iQf-=*q!aUYQz$$BwZaBI`1YH;-gP7>1 z$7F!}hOqrFaC!EO{&XYlgK1qEFXnrIDDdcuZ6t^S$(ebPG~c{LoZ0IiH8Ld8PuNXD zRs5}$F;Q#xQ9<|BWe9I4*x+O7C?%7Ao`8|n)fUmcrhAh=L;C1Q4Jv;cK@DOi%LO&G z;;)XBI_>u?S$4vdgvS^NwUZ07BisE5oa#gF)rh@87MzVhfu1lObUY*9_+T=tG_a@G zta{IB>MjDZI7h*oR_A`^y={md4Jjz1)f6;xhXZTjcqVIke z5XDW8!Yxmh?*i*IRQa~n52#KE@<^mpCq>uNI7H3U^eaIbs9eKfpMW7}(;aTY>OJZZ zaJz4RNowW(xdkAxczYmn&>YT=SQDiX;`>6is@*FVvDGy3fcU2e8-c_&Qn_vKAfTXI zG&qpaJ&eiIe9CqrA}xI)r2>3 z!$Af%gW-LDBAH`o#*0g(501;8l(g9hq$!)rEoUL?c|YAe>2#@cXp{=H1nDe@D>L_| zx|YIw+W0K8A%t5l7lDQ9o3V!KqW&~kv`ItY@5S!8( z_|>!DoZLtlh<)r|58fdT6mm*zMK#+;rStoKWr)=VQd0#7`Qy}@(OZb?rePd{evU*= zv5mi?{~1ywrj{2dN3l`(H<{HE#3Y;{b9!IZaukJ4)JCig<6|Q%`EpqN_pJFLxuMVZ zsUDi}tR@S;YuS{C5%JziXReF!o>wv9+0`i;r{%#+u?x$MYzw3La-YA*7D@B3H=Fsr zZw^7JHfM>pCL7}!;~>3yem*X+^ge)YxXL39x^Lfr$vL{b;5N=Kt!x zL^L3#*}Cz~cxg&abt?OiJ9)&>Kz`g5=IJIqYwiEn-nWNC*?oWSafnhRr=&=Uq(V97 z7^+7R<)JBINkO z@AY2qd%fR3EPuFWW*^r6thLu#d+mMi+qvz>k>g@;*&H>IkrXgf8 zx%QNvU;Ou@IrbhC_ao-1U2PlM2eJLr1u!6aUySiNEbN_9lc*kOK=qa6zRP_CQ=UGd z66?8+`(A@^Sh)MDB~1s2%0(%ysiCEtIqEmI=Tv{7!n9PR&Yimj)DG*S=I=~n!YVI4 z$KcG6?k5^ro^7QK9wWTK@5S6U0S%%f9~eSzgu*#mIai!-oJzAAxoRf(IqMY@zlZnz z2%?d$WS$c($i7?4=w)Uw-R-?}3-nrt1L2xS;7Ww1BQurw&9J@=?kxf(?5LZjGq>iJ zDe{4(XSn(ayzaWp*7-yGyFP3Ttp2z?Dkx>Ox@7uQiCWFRG|3r%?uo56liz#kN49-f z+M+y?(slq7E*rHn$8asx`*A#Bb-!|_K(fDN=c}_T5|>=Zr{F#6v!PFGeH)18zz^jhQyB zcKfr05zybY5w27dHxt^o&(KnYIuA$fI{s{#*36Tapj07UC7Og;Ha1If3$vC?Y@p4r zRS3N3oDK*uwaIP2x$t!N}y5t=Q{uKOn}`SU?xxyx(xs zV1%OmAqx*`lc#6Q{cefL#m$m1_!S4_P45#5gY)ARTnZmXe1oAD+|o`l8s+Is_O&iD zSR>*K`jvOP+O2Hmjm5Q3#5Z^d3%>{9GRzCjFO#CjDbm@3giWDb{30omSAETUF8$zK zt=lP}n8EZ}$2*lES7DB|&o z;y)EV=u0Z|&g@*v33MFi{gm1h0XT=u$Ht|CO6t*iQ3-A-`x)r>AWW>t5B{v<*(Fult~d34CzNc3*yk@B4^+gQSbk z_?=zFd_8k8N=krLoEu8-tFrOCC7{Qfir$9v1E8V)WU9>&x#qtE6(BdP< zV2*d@m?g9vm}3}}{9)cZ zT*up>d1z>jnqvKf3{Jj~Z7)?1Zt$#25GSOI+~0KhYvt`HN{{(2{lpY0)JxV>ttIxB z`i2vfR2V0f-E-%Q8)nk$v|ifKZ8($dXUl!-upxGZ!=tU%9Dc*~74xk+4T0ON?U`|e zy##>`*~H-Zd9viYj%VXt9^G1G__BKKqb~Ez^>an_l%zVQgvp>Pz&RmOE*CuEa}y(~ z-xlT|yiP3CVA4ijk8k7|7jUI!9XU;&30J3BE^@Iiea!V8^QQ(b)V^u z$=_pt@VfrYS_khmukF?v^Bp`voAScZ_sprEuxX@cST=Tf#ByQ+ayHi!`OgknNH*p38t(v6s4(Ju?xu;M-G?(+>6UjTsaC;>0MkO3}?f zOdWKmNvfGkFJt`fKlA_70<^kmtyGU9t?|Cl@pSI|?LmkI?`h1`dgg3P&+LN_)5%b+ z?+p5uk>tp@LXLH{`5DBb&g{C05c>URVj(@Q;-FRSIn~T>eE3D2ssu3Sghoo=0~)E| zyAHFNMSR;@4lTbIrdoQf-f=W=RO$&lSMa0e*wts)D`WY8^vGuO71r*-l{1?mp{ zdMKNEW<|9|GQ^j&YS_24~mJD}V?e2%*1 zP0O>Ypcels@1#z{eMX*vVztp<&%IT02d=h5qlJ80Qa>3n&Ll#Gz5-tIOtPj)cjfHV z$eO5Cww*frun{YG#X+eoGj{mA;m^^OA5TSwzME;pdmkr=#^qMV+Z_!gHE=F}lT3?f z(Xk`tJ{dWcQ%i5?I#$nt|NLxtX~!(-p~~CwoUys-&L^bV)QfH0g`B`#7SCVlfQUd9mi4a@hJJof4t!P-fv1Lgx& zfkHk#`r}u9=RA5xnk(MM-G2+loRZbE`_eNkv+@RTm~YGy1Je4RdXCE~{PEJx(M|sI z9koXp^dmJkohyNvXTuaJxFypC6WiPgVt>Xx3k(c#u_EMw(O%Rzw7e#i<_e|^@ zpj8t*1jDLsLQ5$eKX3CGL#_`-w!lyKN$UtCRJsKx-m1*ydd&$8h4%;^xZLA|gVsmK z--<)RCIjwe5GKEm(roBFVT!&)Y_*=S3nxL%KWe9{S&}#9$LXbxKDGjJ#RcvmFm=dp zaKx39(02?I>o!iOYd_G@Q)#{p%3kR7Ew@W7NcmP#!2Ls%tnSL7+$$+iq^^#@Fb7k{ z#$|^m+6zO=KIfdR59yo_SwcRLerq_U$A2WQ%tu@OHm6U!PN3#HzP_;BlE8yyrz_#x z`V1wp6t|O7<1Qmg!Q(InyNsC4Ess?~sLQ;yoo35N8gJD#U%Y>X*B~ELllHQKY1&X_ zjKOW*&->=Wn1tE;DYd(W{LP)iC4%w!8D(c?=@c%Iby(qr*SZQSF_<{m>Y^(Y4rk2* zLJ~AIDI^EJvUV&rLxk|*MW>!_{>$d)a;6G*@*^wfMMJAHU_Jn<*f_k`p4p4!KN3>o z&v5U`nRg!!ir1D4nA=sMm!Q?TNqtZcuH62j%}WBpIr;S0$uxRq&g&w#bsw?K=fpG} z2elUT54|(;i~?x%7Th&eIVA{~!#rL7FzkxB^SYw2)Gy;lXMN0Di(F({k|mL;5@*>^u*njml8#g{4{Fw5E;}>cASGuMczL z4=SreZrG8}RVY}{|IjkQ40#|5wrkN*vlb?T*^&M}#5~~Vc75;%jmEuB?8-p=_>t^0 z&?(&i!x)|(8CwT8)hTx#vlBLKpdYZ%HrqM71SFr%fyobc)PEUn@xld6BRr(62OmHQ z850LS9Rg6e@8OD#ElrOlGkE~ML-T_JXyog4Wm^a``-i6EEc^{QztUpb)DDmO?N7z% z_fgeMy_Mcvsq__LUy(c)lu3u@8;)eaj0Qp7_GLQ?Xk4E%gE4i97x?qF9Mf;`Itc~<$;5zu&GClv*^JUy6(hF6U9kBNYv zH#FC>ZwwcUm@|C=n96{M(iMWaLHW-!alKW@-XuF>BKZ;U^J1PH=qEm$Ls6|(0hVM1 z^af!uRd4w82s?n_ig-SJi^aUQ1iG$tl4iNEXH<6LrTJVhy@J@2K`yl{l{+73p(0)%~^3dqdXFj_6lXpB*KDQ>+9`Ft8&f#V!bRR!g%QXp~Wh=LOgiZf_N2 z%REzydwk(|Fyd`8$fV&hnAZikxdY+PzisUg||bEdx(P&a7s-? zV@QC-Py+LPPbX;I=Tn}>?MVrC{i!eLl^}> zBUYdTt~S^aZ5A0bI=x%^@>{vXJOeO2V! z|0{g%KC*wb9!|@<`IPB<{2+R;uCZ-3`#x#2Ze_{<{5jwx3Y-%DMRl8=|NA6h-wpg$ zzkoHt561do@EIjzDCKqvB3^Qr4uu;e^fsgD!&@UnslLhdasju0o87xw|GlLxALF}J zUte(p1Y^Y}G(&(}^0%qGh!p!bD|Y`NXq5doqq*-tF`E2E7`$xBC>mUdTI2VRIRuz~ zKP3g6-Zc62pjd|gHDvVnx}Z^*h%$|Er&)tk_kxD(3I)Y)Di!41>V5}}^+|i;c-lI! z=6(FVf=%o>B$ugmz=-wHIBo-Q3Qa_5(erO?D+A%JIYRAdPMu|m@KZW)Qze0xfub8p ztV#N3j$*a<4LL0g`M!>+I?7=2cM+RG-qGH+6o|gthBE6lSY{g=9*8tn@|FECfg{V8 z@HNJFX}-Sr1mLbqX26$pvn8@P;+)85+bPR{=<5||-KIg1XE6XLUE@BV@TJNY$n|Yg^3Vzc0_xNOhVjI-8McT`jU@D_M#qtK@ ziqFe|XxE?*2}P|mHnPg5SSMo08;aZ5bqc=|>EAE!2P1(?;-p#c8?kYs3=rDSC2~PFf(V{(xQhfdUjJKAu2Mmt(W7 zn=txu?N01V;iG)gg#+PkvOzR;PV@D?X^g(A?bMFr2`KtTkd&~7rSDVW`vx9$tD%(o zGeaRA`W7JSI>c-+)`gGk)eomDR;P=zX9A|d@*%5RJLinHAj)@#(()8!} z6N7hAl|4fPLP9D0UTCck zo^`PU0T${_(I}1Lq)QJ+J%P=huqRfUeHWA9Bnt%g*Ll*#QJ}c(V6O-T+_C-jmAe>P zM45>?%8C_#iREHz5T9|@%@IsUw=|ieadCoWF1Qhen4vZUhRvKV^)UO<0nsd|<8Z24 zog8A08_%8e028MLg2i)m;DVhUkZ}vnD!9pox3VVq?$Ljg9K{XP}+t!Tu7W`snQg8S+*qs;FCk*R|OaWD8E)L;=zXu z#t&GbXi+0Tv3TAnGL!wP0DC=N z=E!|$G}sdTb1T$bjk}?iEygjDT=14*YMIM>$QwJt7qJ5F%5Qa^e>@fO;KFeqB)V%+ zQKDk7Mizm2h7V$4rC*4$B*5)KO`zj(`hDRj4b96l_dm^lW|9pJF zMzBHlkOYva13e}PsAkpdn+BdwAJRhLZ=z1iP6ijP196jE(Re3&Km#lQ^)!f>Pf6{@ z&eORuS!Bgv@!{ntW{2#-QUg8c*C;aM#@owefupC%S!#UO`yLdBzzA_GgV-_BGR&Lh zJ0`v;WA3yKFkSbcEomoo5I09ogx2p)ceH@5F#fK#$@`j%JSdS9Z5(>=SlD^l!+c7o z?4)AuGP2gxKD%&hf#vKWB8RY|x3Vkw6g#<+182J~VFC2rCOP$|gNDgyjiRkMUh1bql$xZr411P9$X>XJ!@(X% z5~SJfuQO%DY$fn|U3b1@C{`!$LxGkSh=i7)0BWA)4U2$5xmx+l2BwmkEqF5cxgJP{{`&a^ZRQ8*8yD=>zCJJckF~&L5JG9S1Tf>$* zs%jKqYkf-GCEIUm!?$O>9*{jAg*Lqzx1s0bfexwu%n-B1c=*O~EC((w#@G*P;XAZ= z(V7x)2-?v04D%L;8$9;#b;1Lb5P4KJ4;O{?gc?V@^Df9lYC+cypH9M4-x{cTTQZ6F86u4ewOZwYbHvc8ma=&F7=L==YN6So} z*X(OR5w*S>tNiJ!)bsIQEN<{U=6Kuv_oV290%Au5E5v^2TfmsIOIR>*j%zKZqt9BgUW?w`LHU8(ZE?kjw|lMGllHQl$D^fGA0Ys{FkE5szzZ} zQ=ec{=QqJam~2V^CW0-PQ>O*oJQe??DiJ7nnVgOPauex$=4Lsm#7!c)O6i>D3aoX` z1O9IQZ2p>cGMi2{ymKu!TWZXc!vBRoiT{BJ@i6!DJCm2YG(z5Bn;Sl9g59n76_e$7 zB3qeWi$kC$8y!(CSmha{NaK}Jx zUGvANM5%QsMhNdBji^3@&*szC1LV*E#_*MwyEo1(QTn3>m&9O&jl>F+YUfi~N^srR(%X(BrrO&+gm%{^ zQdRe3@H7r(y9xxb*0gw`ErPZFf}@#Q)I(ML1oKz9pahio9wbBu8s0Xa+RY4NHpF=T z<#*G(=i)R9%w#VXPsgw2oVk7Ta!)&*=ISO3#e!y$$pb9AaXc&Mj3_LhtX1_|=itd` zmp&Bk@jqA={~FY+g3H+3mwOl~B!#k4Q={zt*tBx|uy^%V+GYLLIO04p{yh+!BSpt;{` ztdJixUF&iCs}pV%ZGg`3cVpTX6NpG=TGpexTkWj zY1zw(GMw}A$b15F7NIfK1zVMCDc{H1Epw`5H#Bd0t6{X}NOcPb8rWoMmJmH}mp&@l zrd;bV8D!r+8hyp)R>RHB_P551hAEjYQcF2g)MreJ-~ zxta4+;^Wy6l&mfwJG1yF>s_xs8o4VbBfoZYel2ChRW)d&*F>$|)ZmJR*`_LuV`kk~ zFJ_i{5IZL|Q3iv*!b%Q++^ok5eVAAA*p^_DpCVe}f7yX@p6x0`sfv89&x%q|3+NwX z=>XdAi$AkP=D!A(3-;ddzSzI-1z5=Ge+`?9to=e@Q6#^R_{aZ6SpEUWuMdl`{6gX% z{}*BT2OPgXEW+{&iGTcGgykP_{Q9s6%P%DU@qZDPf57qU!~Y*x7?;Ymt|2S@)n7b< zV{?uE&cNUfASpLga*agGdm>fv+q}@$!^qEhn~*#UDiyszTiqg6$hUucD6w@FvWbF9 z*4=1{2c*j4pcewSDCq^5EUNbcNEYYxLP)@(AzJ{-qV-+?30QPl3o%*r(+e$9YG3-Shr=@7^)|#@Kt=UsugpHN$U)s^+fVDNPzGRw@7h zjn)bEvjDK$13=M2xdXllYEklm|Jmty;-U)x)Vq=Yp;~+V)&STKwA7ED_l%$HK{3D& z0pbPzV}%rj7xn{>FDN*N*duV`1qre5;QI@;*njy}^^p_rzkL1RS~umte0_DF?7y+T z_?Pt`!uu=mKSTn+Uo+cAD1V)68=?G#ls|>?7gGL|$zMp>Mke4dq--OVzmW2$Q2s*7 zpECIiDci{8FQjZIl)sSjr&Rtz%AYd%3n|;kmH@mP|Hn*F@`(m!T~GKd#C+OUpSjaL zBmGkugf}bZDedFx@Sho77HS~}hsI9&bMD$~{H8v$#9i<3B<#!FLW&CYQDRqULimuy z;CU8Pj<5~gJN+c|G-+scv-yO7d)>h^batt>^lbCb6zr;(Y>RG5cLo ziJ?iSE4WIO{QJ!#>>?UhT!g<8Tdoh?0MgG&)HUnke(SjucVCsa*@{x3S7lwQ;7hSM zk%i74AXth|ba{Rf3HDgV+?BQU7U|Eo>zZrWXK0A6P#x*JVX{9&FJ0XwnRr*~@SdbA zVQ~i~>N4ku@Im_qAYH4yAuLU}BYNqp7S3tK zt9&UgM`%VRkUb~~3crDl$h`MFh{5C>KCOmLQhvTcgRk&qITpsotx06O;SNM_n7}>O z0bD%8#KbFkkU&+{*Qz<1aOl((=qA(HCI!TBo@g(0 zxd$>YuLusxt=U=^x;Yb>7Q;zc(c|%x-ZFPsgPPMgF3%{3GSrnkb6*}MG6!Qtk91OF z!prlcPeVqpZr9)N+3i#+xrfMvNrIK(1E62s*lq1|O*kggdYU6{*<}_8crk1h5f9FdE6PC%q$v73%5j zlw8lDH8~@9&DA2}4x=S9uYzEqv!p*;O|iMPE*@k{lk4gt#B6$n02vsEPtcD; zrD>DQmr40Yhv*&IdBco{h;YeqCwQev)|~4$?5T+e9RgqfZ#`|Bu)d zy-n#U=W%_!j_-+iDCtZTktv331Pf?}0+6a@4YC}{WkXTVNz_lVIQfVR5m}ebe#Ad# z)y=vh4_4M5D?~Ak&uQVEY_1X+|40YHuzr=w+Bm-K9#X@{h1JVR7zFUQdrlHeIm5`T zD}>MB78X>Hu{*?d*~JsFO3VR*jpUls%X2EBBB_esLGqYd933Pe<%BLpd% z>Eel;X<&{MJY4^14nu#*PGSxrZiC1h{||I)t$F@>;gh6m4?MK8;9?Vq(Um7GmWi-f zFT2Yai$(VpzrBt`Z~!feAUWpvH>jN*Y@7BMG0ck<=J>hF$3)Z_a~M%4TH5*(ZM1j6gP$e zw-!BAv=_xcu+HMx9Yeb9Xn;v02=n@Lv8gi=vRbCbbm)j){vvzpWi;pZGr-am9;zE3 z|2?Jb)XTkgWHI>ziZ>MfEQoNxn?5v3Cx?Nhe&VEe_^@uoIU7{NyCA*}-6@&tMCPEm z2(s0JGopt1>-$8=#cdqwFhjTZ5s_`@0fKB_LZ9a^(si~xfI04|E++30 z>njPwqOaJCPevvX+B!>+ zIAKrDhSDGNjYxmyZv2tRre}B4UfK&M6qe_AXiXo76RcLIE4ccI_!jVVhxz0SxF%B= zR<)fEc2eTM`bNBmDJU(GDR>uoM=fZAT!@%H-;*Jkt(oPwts|NPCmv5LDCUC>DG5VY zM5)$%R~u@D)FJYcY=O5Ue827E7eMe~IHftF`+&T=8z`@;kr^S-`J#qp>V*w<^i!0cL=;}hK*U5=mmXwF=;%emZrwgdgn{-;Dv0X>xWQZ)u%(?$C+_(#j z36F+N^BFK4tiAE8b2LOWj%g;O1%8Ie4&wOgvUcLbXdIfa`-Bjo-|s>2Jg(O0C53vG z2zID`@57}>H(C0K>3ikjSsOPbgp@$_>CSg8f|v(z(^j>Ipw?{9jL-{;r>3z|-;!cEe z&XtsQYk#=-(@0tM1RIn^jt^DhFS4}uaFE(wL>M=P9GJ7sTBx(3VsZzjx8xZIBfcb3;2yJu4oWjESbGJ;s!X*u(-MI~D#I zy>zIazdm^i)gQtcc}c3E#Di!185l1l-^T1EfCQx;#}^K;@u`l_0+xR9$ghgFdC=vw zwiE!QBjN^TQ=UGGM9e=%g0R!O<@qr5e`W*a_33jo(0jIf(9g6p+uWgQ9Fban?R^uN z?b&GmWIyDy-Dvo3ffva{sbSeFP%l%ToQdi$Il?b*Wxd=VKx~FHS`3P^R1uoy%H^P; zI3oMQHxRUW3u^O}v%NX+WEva#LTqeklu>#a40<~}NbZy!H4qzg^|2vBG3Mr3n*vyV9y(#xmB**@nGu+F zo5jI$S#oLOh@vZ=0+SoX40xIlju#tozb z*M12Tq|>1DaUI`5#E96pr;m=MorFdoGN+Nx0Nke?eTY>yF{yBSGypVQLaKtzm%L!I zu`d=D1zbc3-=MWc2s@f)sfB-z5(i3+u2?dbfz!N=@JRN=qm_Vp*QSp zDM4gEu~9Mm5tYTj3fVv_%npupR4Q-Wo;lD7Mv|C@FW7Z}K3hs`)EECw_hSyB9wxoV_nyQICU?PQJ9?q1^O`l;dP)XY0c-A z0kq6U>nD2~(e0^X*5w_&q1Yd?xuEbFo+U(lWGUm5ksYKU+42i65W1X^@lvouLL|K6 z{n%f_g*4QrK=5y98d$kQL{d3~j6^Lm^ViL5c;xR}7$` z*9^K)Be9TbOfgLZTe2QjJiw65L18snEY1k+5XOJX+ zEy)*_s2e3|J-6kFsLFi|p9#IKRkVQ=gbdmd+Y%wP#K<@u1cpowP*2-E_%MQ#=igOC z;a_@w`GX;56W+nh;*oDv!f3uC{lvLIW`mMua>bH28nxt;SH~~3eCKXfW0Er5-$inh zT>^A(XWA3BtvKnn0zMGAI+dsoAV?3pFJA3>O`hmc-)!`yZ|hVOWH zBXRNLfloWZyZ>^TX z%`Ww(LBQ2%J!=ebmZLYeUm_-)5&G0MXgt&6ZcWr2o6fVp*Obj$E@#ckH$T zV9dh$KB*%@)L|xpmB>~IGg7uVx;_yDdQvV&wx@zWv1Pg@6aRbr7uT(d2RK_|UU`UT zP44$yfd6+MYV<$x(Bk%gcP`z~GYIV8y7O$|G6$e)t1bUvy#1arPNnB8AP!~-XZ|aA z@ez#NCQ}yZ@wpJGm+-*^`MK_NBf%Udj)PmICe$1N?!>4kk!3MJ)L+~IA%Hb@S|%Xp zi3-pQEs5b;2!!E5we{m zh-ZoWjzN0I#h(+gY1^Zaxd&waI{BOZF-paskhxn&tzWV)ku42*KOco;smHqD$m^oS zm=mB~oUSSnpnDreh|n@_g@Fv>`gOEr@fZ54FjEwMvb!MH%n}B-?H;^)={aPrSeHzg z(;W^Xwx_Lte1A{b|6tTznWYK(s)v3sO}#WCPb8nih%g|ETyXx!42TPl+Hbcw*nJWq zsC&VgQ|{Kj)Z#91eL$+YXM3{1c56RLdybG|E{YTD zxa(jDX~<)lr4H@@Nna%CfA2!9p=ivHc;VDEZ9NwCk2SS zVo!u|3=3hDx!okP?;~UwExGqDL~3lXhljB58ACc0dUr$p_FDmaV1l(s4)zC#OZ5nK z3~Wr8`W*dsI7 z%H4B3{wCC}!}52Y84vd*a0HTKKJ=tC$Jk9eJWPE4u7iKM<6t`1u4_KJ(enLH31%Hx zy4!ClMiB~0HCN`A-n7{{N=u)x685NwDt581PP2xw%siT7A35&OmkTL_(CKq*tLLtb zHSf|be22oO`}iI%#817)ifbNS*q(I5J!p407P?z{qfD(U=LHXqC{f5$X%Eodc~&B@ zvdjgZeSL{t*S7E!ps0V|hrL~ueVVLsd$$nxf?k37{DLoauDN@dzU~}%SoeF>(F#FI z5IU;zrPEAUc3@K_gkv6pU&;~@A**Xzi04m1&yu; zyV^A(Q9vqx71WiAXI}8>U6#^70vT z;uOh~<1HaQDYLcoRZeWH6OT7`mFjIM{?uIu{f%q7E0=ulJo|DoDSM&d-KnX?6_o4` zqmfU2+wUEi)5E>%sL`rw4II?0V(qIbq_5IqTMZ%cm*JFt>@WNWnHX4GjLe-G{7qk00v#714{xs&r=k7lCF$JCW zX)K`&w$!rwMFVoIp@TKN+-VyhkVjvYW`o?i+U7C64Gj>@2$$w;ICO^cvO(Lyp+gjM zrX8li5j48d8`FL-%4oqX)yC)A4R!AK(o#L9w6=_gRXAk110vZ6J&nC3wxJ4NdG14W z9|x<&$JMOlNje=Sv}*DU0cW$4zcn4Bgi<2~YZwQ+Njkoc zQ-Rx)?DsGiFPY}Q9G-|~Tw6|FGHNiSx^SePJ1om=`vrSOA%ufQ@R3i}c2gSMDPiyRG=+2xGaSgg%tFHFd9P7UtuJ^1H-W;qaevG_u1%jkYKD4o9Q zGTZ7HyZZYYXeB?3vQLBA)O498|Gg~uQ0N#9$$lk2^#dhQVUUu_esE97blr$=^B+ zZ~EK;W{9$ESG?I)smUK#_#BD6QDo*VvXUUi9k8bZ#$bGa?H$pq`Htfo%SY*X_c+7o zxe>Wdw^}C_u`)Ut61I;vnt>}}uHq9%*;We%8~R#YgSZ2-3p!9hjWVgLGfAjs4Qi`O zq>0>kHn@bbR{+_Pr@8l!y)q>GW{$Cbv@s(UXsAZlxpGOEc$iskbDVc>Kh(`i0(-yv zq5gn_CoUfp9>79Ph>{m!L%yj)$7$VMlm4^;%m<{MveD7v`c3>ZaAMj2!`nY|Yl7O(6J7#ch`*Rx)M`>)@jpe$51 z|CEFfk{}B`Ango;Vpxm|r*t@knbiPP2l8|2)!6oZj;61=n+vlTjogOh(32=EUby7l za{2UaC~*X#{+|zy6On);6f3FSX%5TxOlt;kw}DB8?7cCy59lB^V|aY1x!I{q<0_KB z(W+5ad@`lb@QAE?K%D~nl?Hh*6I;HFoq5Q_^1+1Gv3r-xpS3Ga58(-mc{3Wb^=L{_Sy}-J!A0WHDz`G=SxO8(iYqSyxciZ@3!d_N3kW^ zlsYXV8Zl4y|0xj6EojXc5|~5LtI6%XT1EDBw)Ps^s!d~z$^aHL#I)3^LCj=UGc0Mp zOwwA=1fkWX5tc+V#dH>>bk^Ml{OD&{oMiI7)_hPl_?gL(4xVg^KI0=#bSAKF(O=e@ zxwGrQh?i}*;i&&I?@(=*g6NhI&tF?89~w0cTwRSgo$J*sVcYFd+5`^_;e!)L_Pig7 z{OBPH-)m~Em!Cld=kyapbanlNr8)QfVpCua9sj5?t)puhDNvo#XUk5(`{K~ibhii> zG^}$Qvdc<2*=F^gk=a@ocxwa4 z6L%ClBh8s=sIa!Gq~chyM7Wv3cy)XN@4;qsn3+~RK&z8o_R$`n@^90XF_*gu3<2g_ z?9cVO;ql7ez=@ruTOqE0CjUPRTfQt&DPoSNww4Ckcz_G!pBePepO106@>>LFqDmbG?1vuj9y?7~6ut(r%zZFR3+E!+NcC^f={oVU zqjp1o=p8KI4hJ}Lmoi}Po=B3`hmr7>X|T_f%T(SDN!Lfmu9~dEPKIn1E2_74ophP` zN4hkkbaY(nM&FZC&P&zh@#$-8fo}Uei^`Pj+4ZID<42=^+l8rt-71s4%(RN>>Hc-X zLR1$Cia>H|*u@&goR z>c^w%5B7^V_AGya;0f5_eKsymA!F1*-kFt-rPrkJMxs{e(7bjj0fyuRbVodnF)aH#o{s7C@%M&54$k+MfKjKrveH0 zJhN42yUg~~?5HSg2)fNG%W3i36rMvv%maQg>UZINPcRzyZuM?~o@Tqr`>sVw5UduCzAQm7UpAcf5Zt>%p4Cl=4^At4 z0u|p^=BZ7h088~{<*OY=BKXpsPRPy@#|5&fXHq*ueCnH>Iy;W)($051aT>bj*EE#% z+aM>6SI$hU5G(?ZoDty3M(}yxTNcVU^lnz{G!Z&Si_@ieTaE8-JCCI zaVY4xDRe0XaCE4_42SzB7L4p3L=GT*noUi`avYZHdWPMWLW(x`&oVQ4b+6#`R3=65 zvyDk^%5ND%f6An>=aJu>*}R|Qfw;)Ak3A~=V)@}l@#C(K&*JpiRxeNvoP?!Kf18h% zOGr!YKR;|wh_p&?9yf)ChUth>Ha88TFQ>926VPYv+k~fcA{4TmBc!yD2j&LHup3Db zS9w_e#=H&X03XGdqj&XxBu2ElBYWGpSKm|;1;5w+K|D+XQ} zF1lp!w%^o9ka1=SiQ|lDU!ql;jhxv~_(22=AZYsNm3wg2n1;uW%?&EwCd*raVT!8==|RtZz)lmi{~Rc?u5ON=|=0`9&1zN_l1pEKZ* z^zgTQ(Na6-L(3gM*0mKaRkbFKS^5lCw<`7iMJWl55i*Pj3`YVgc)GPD7#k||Lgrzt zY6zmAY&WF_q#^$ zz%Al4L|j(7!es2h>BEgCDg%@3pbmhF)mTl%2n{E6c2g#W29qg;qt$11G99j%-}`b& zP5h!7+w;2DTc(GW_^lW2##lCx<6*#V9-_#5Y$%5~a`2WtN`5yO5Op zt+n#-bJ}2#T5Cc%pocb3xWrqT_ZnWeOYJyAIdE{D0%{#+JdC{4rc+ne{CeG&E*LzKr+MxagqSIqr3yVDOB^vQ8jreNvZO}Y2kRqM z@>au&gZa7E$_tereH(3{dOwYvhnGlLg17w=s`I#!-v{P`i9M6~>&Gmy#Dgqx6^VDUT%VGpVL+TO_ z;T8TI#M(C`7z3pDt)f)&7RB8|MKy|D;P^P$r3ZJzqGShQ0 z4z3W>7&Ch3Mt*aJl-e-%>0&Y6y*_QLvc;#rmJf2yO{%==DDlzUs^dLx!K7B;P|$;T zYP$fQEQ*kNJ;5SLhuXC8`p5E2IO(%FouAPX^0HUEAIlD<2X~&MH~eBp`)02@9cZC} z;My;A7JcH z(8S*lJpe=&56JE<)IdygLOwWHr7dUCe!+R3{ zzF=CoUgy5bOX0Y+nENXlmjN!+OEQsa|4ppi8U05oAgA(5L@%?-FnP&;SZ7-aYZRZN7$=%7$Ptt}qnSz@Gwo%QEGJab|! z*{g4lHm>aF**MS98Pn&ChuO!JbHwc|fIFGBu#vEM1@iSQS_odjnI_D;a2j+*F@`5`JG&==Qd`g1N zbzBSS&y9ewTU^}91UjZYLDF7Pc<;`8*$fhgxky2NtpJZQnUtI2e(pr*kyb&Bc^xI|WkaNXszI=vP(~1MU zN(Y-8$JCO+tI1v2M~ZMV^EmkWP6n-a&(E z64f_ZE-n-{u&)3-=7&MM{!#4Tbb*(Q|HD^#+rlUen$hm#vfMMjPxK8MkKn17J6GlC#kj)xYUm@9o*1 z2)CKq*6_LA+Va*3L1qT0r?D4S4rC{7*)RTCyk-qW+f?a<50W{-=WY2JO5pjJU4|#0 zp|}0+&jnUNOWT~7r(SC&_JA7y-lx8@o;MkrQ7Oq@nvj17iWZ)fPTol?;|D9*j5>4j zNpD(^P=o1;r?ktsH!H2pvePc(U0v|k4%1&DNanggelDx`jS2!oyb4nDFvrLaMRp(3 z<2CU#t{ZPwAd@30jlpYZ&+npe5YD{j(_{lXU!|Gmy020FN53u1^~v`e^1|QJ0P7a! zVFM0bdclE9d(&Ir4`2yILC(%4u8F~4H_ZXcb*(6E9{gOtS{jC!k36SvDR9E-KPyPC1 z3OsiTm~R@mFlC=A^!nX9)zS$ipA*x~!lMnHl|Y+;`F!Cx||D6 z8y$vpTvb(glv&@t;4m8M-`m8PY3)s7w#8H6?S^?zyfNVe?gny3u@T*(VM)%dYWoQ- zQ3Si8X%g?m7@RT(RVglw$=k({vSsfQVjF!-;~ob)YM7}LHlZ_Ztf2jo@@X;YyO0-d zBtbeT*Av~TKF{(Y zt^5-Tr(F)b4ljKrah}rjD?CVLaEepeD!BogW4btejfH_(h_Ff3#&U_DV3tdbE!i!M zX5W{nD%%|)^+(l9LxTGlxh!u=8I+xLiB=Wu#aKvN-aqXH>n_av&vGslUgN38Vv@@f zz>O4r`EN9nu~)6^hEydyZ|;HrEL%fjJA5!5tW+Vi#vj32QSie9f!z{ziwGY1LE8m8KQqkLZAii z3BF#Byi{g)TfC^*-FR+Gb%sGM%iX!lNM#OG7h~UCIpa+OQsbfaGQU(_X_fVvyaf}X zCYtkMyC`QhV2t?&6)Fypx8)3{NHV_@oK^u~uK383!1hn<@`Pv`%m6VQ!%Za1CYm*R z7d)$AOcSO*Y0q25@$3GEDesh|Nzp`Y1XuSK+~BVFvG@XO{(J_bOj(CoiaQTX9^7i( z5u!!qu%^u8SKoLZ$V@3T%lAE(KVivR!lUv;K7(fJyI}T1XsP9U`miv;Ug8~W4wmaE zC2;@#0q59ksxC*}e6E45J9>%p{snSmkFf16=iY(!o;*T;T0QAX7<^7z^19x-%{3WS zdBs`UP^Ss%Qk)0kg-R%W4$8{@heV^aS5wj1tL~u|QkJp^m|gRHMuI7vYVik5q$2|t}LmJ^=LaWi0u8kh_HkhjWgazk6>XqFEsh3JL(s_lt z=quG0ad1BI653*zt`HpTdtr$4SHmLJD9`Yabv6n-gb0ujNs?i|OW01_p|-jkGbSe+ zO78j2&0fe?*-$6z{5=^6zMsg}#cq@Tg5y&F^DqGhqidv?1jjhG9y1oCchak>a5A#V zZ;>%cQ;MpZ)V_gDX`p27SM`F4KCw>Ho6bn*$;==bxCIIteJpOk`mjSS3{oxfyvMYS zpY|72UYU2wiP^P~oYLopIbcLYpZ;eTXAF3HRjuIYAjgo#`Hv8n~zgDupi=&u4MxK$6HzZ z63?RWP4f4G(}U%5E1hharnGsQ4mY>_)S~q<0)EVj`a+=dqE*7saxiR9 zS#+}?YLu~LVOXVVkd!)l^`Z(-*!chpx#viu1g_yEbewbX7YQG5MvYf;#eV|V*cOe{ zDhPk-=X4b`gLz%Uax@g|dPC2BOsxn#?5h8heGmN6+iWSDjTdO4>R@n+XpTf^@gs@G z!;yVv0XezD=IU!N_A3o31d$>YEByA{cQ9~(^#Z|Da2gOUzvi>EN05*&g-W@0l~i8& zCz(It(J^*S`I--7toe0rv^McOZJthyHp=;)@|9_6vHIpRrRt~*@K0L1dS>2{g3S5s zZh9_ATD!!#HkTSWY-&-Lv+1*Qz4L|5r{$f?7N2g)>b^o#sff?onJZ!+FH*v6FUa+&+ zTR*IJe)i*GRfwj%+(f4Zx9z76pRbNEL%FU^-Z5|t?+@{2RQ`j&I<<0 zn!@BrvYHO`9~K6#C%TTUO7i%H1-p>K6bFCr6l3UKt6Rao{Q4se`~)80{x{8<@GZ9l zN~aKp60qW~tW0AFw6-!tHNUNN`M3sIZStiuv=3d}HSQK$&Gx((9_qbmz1HpvvE^&L z*#f7L!hL6RPl!F9$>D0bJQmr=jVQtsHP}s(E_Ysp@WC3MA7f5I6m>n(ANe8Rh5R8h z;TwE!=g{yHRGwGyX+y2y>jXB?u}}J)JJm6w1W03<&TApRypW? zv!Tg((fE5mSuf91{Z6wyLrZ>j0-GUo)d^U|Rm+ zb*(n1ldX~`x^?8suB)Y>d6hy?%-=pkxTO?_**RYWCny$~y1%}UK}EBsC>Ive{X1j1 zNYY<6*VxE?q$6wvZys;5wL?!07(qT!V%lS6hx}+`#`hiH&r{x9>4Lx05I5&2STiTu z@?lK8WOzvwHz%6$!GhFQf5UdA@G$8Q7f(4u9eYMnklOHYG!=Ha`*;S;)eScx-F|MQ z>^WSj_QbL+R-&SUoRFixhZg&&Xz+uhKxB^W7w{e zvPF>2W;wegR~(@qmHU*MC{olkqsfh(eRXG??y42`x(tjp(EU}9X9S{DO?tF^!Ll;*s5fIW>2RAdOQ2>0-W-ySN34%268v#lv+q07pICkiZ~yk@jl9)H>#;`97&>e0eDh^{ zg;q&d^%^^SS0CmbvArq=^6ZSACW)1p)v(qsSXlo^Jq%RIp=+xqwDz`-1+U2SG?gJ) zWsd2t(pz3z5Do;s=}B*GHSr*{%y!rVi40yr4`V3U1*j%pvEw}!RdMDjo3vs<@=F{$ zCNp4~?jrVd?l6TP$<)xkU7l})U|o_lbFK{q6iyv80jXcIy|CY(651T-G8V(f|Kapz zV{3PKYUz+SYMH`54?&>S=}Mm1rjm8tmYO`VrIi%e{yk zVIuJPqI&!b%IsNDp6gV|=7FkixpWuItV{RN*^QGl)3cCqMBUz)PDF`v{$y*4n-ka^ zGQkarS4?jt{74S{!d?|~X{5tM+?-Xo*;?m#>=1ZSPHBX~^5X5T2jUl_Yadu?qs$pyoYR~hc_Zhk?cG-Pw5cU}nvTwA=< zeZ+GBKH^KT+}&X6ZmFS;eg%;lbAqc=&NWMTTlEf3IN zmKM62a$%6>a6zl0Fy1#9=DgL{qB;oTuYN0+cgN;UdW7#URK+olF&PH=ohR@-uL%uX z(V;iAnP>3r36z*rF>}L?oJTOf(-Z&4Ye&Bdd+!d@ae>S^C|1e6q(EV~1x*6(uS9j6 zKFtVG6u0n`A=e@1V)eS@i%*rG6$wNEj&b$=*1v>TG;%@bH5l*N8J_Pz(jeU8sRbiC zpvwHZfeI*%`Ks% zYJ7=2#1AObqkBkUH9R*-URvvCx(Zh$iG7X2TzI%a&awkp{Pf~)$b08)X|KST^3#0WxpbtszlSTA9iG%Kr{U0J_v5TH(l$6c9`ud>y{q z{(P>`D2;YZ8V@oTfa#(w^C%5N#P-Gu!afb|L9!a$UOZe-)Ox85+#VC_I0;ChL{?*2 zcx#2OFOoz!-#W2a)0=E#G9Z^nzm93y#p{^DosPc z)UFwe7?J8?(}EEL{2Mn5rEsxTR_Zzz;L#}VLI(4>Ti{i3jI2QAFNDH#ct6OP&?YDB zlwcpd4KS~B`B;>A2H}%xYQ!5mD$NTpCL=JP$`PCiL~_M^h&9^uc>U-zQ{*^G?5|$I ziPnU*BM;HKOIruwgaop;6_FbXO)3S$UZ+y?B|QJ+*cQDKdXS@4h5Xv6tNMn-0ZWV_ z^Udt2{jokll4R}OYH%3A21maxOeVDKWe6z#zIUtuSyaBmdDu@wY8Q~R3PHjm zUO*KXo6y>bf#biEg%h;rZ%Ug8?nm|m3J`Z{hnZ2=R}}*<_ z`Ew`e!q3gm0>oYz?p@e%;F4wyRvVEjOi8lX$RR=?|HK&~R{8=#^IGR450ORnGgwVo z#&$$NT7mB~0A7Y*qMat~AzLQei~G-sga_^UThd6==0B8PA%_5N=;}LEyFiXpVi897 zOS6p@G>EEe`Vf21dJf%vK?>Ul>?qqP9O{b6q~saou!!*R%&06udY`v$qin4#7ItOA zg6V{4Z7UOb;AL58v1m5sVd(=XC1~`wfx)T~N zR8hZ8;t^c)t7|W|e(jyz^4Qi};T* z#6rQLhhW`)h==w9(lEjpC^XpUKtgX2SusO=4iAT~JilZM_XSeYgS#S3zteJpJ!gVK@uL=JsE@Butm!pL4Li`h+<)ApwH=zkCk zzrhPr!z~e6BO;1{rl47Kp3u(_xEk9L53hVq>AgW=I6l1B-Hr73K`GYP?K!B#y5)u) z+xfeog+E9L>=xi7tVbIDkPFYBV5NglWcPKbQ%D+T z^$WpZ2MsaW9*qEHBn5VGh%tM@-sI^qy^_Y*a$3y;9z3h>u;c>j6+ z3ZZ9vLM;0zhp;%vAxE&cWKujkGM)uiwHR!ksm@N4&hab%#nmovhO12?_mgb<#Qn3m zMv_vh_9eZHHmm~L>x6&_zhIvR6VCRK23j#}!Q;meM`=jQ z`?2`wW(EI73;mBL9aLaKQ!Y^hqF`m4#!z|wC_#b|R;H08pfif}AvrJ_l;SzH3t~3D z*3$2UAnX42>e8cvzfGpfM3yRd6Dlod-?~qSVj!m|&mim#<=jrj-v8X}-`ls4w8^vS z`~3&Zq>u{wKuSw(;ynbyat@T0@{>atM+9d#uO$habr|fNZ^-WQ)GHSn*rFUc7-WUE zLaC}cyATQaB|#o&if5j5Q_l$XEh>+n@VYTM{a!V(>@z43C6!OH1LS`yFMu9}`G*#U&S3EB#?xnqyx|cN&-Hgu{P0m# zE&&t8XOF@$2CFd@1_=rW*9w>A@lO2kCYL2ITt~)Ma&%%ZqGV5VO|MKUfQt%;xF+(b zX(4`}DxAs#eUA}kyD09Ucds@Lp$kAji9I8196%Eo4_okRGK1Fdw>czEA$N)#-cGcG zKPbc)Ko9iW;6WO_padP;kpcT=EqctrvgD)BKDa*2;%_k3JtsJz4=sW4FoaJx&j^x{ z6X46U=rS#SvuAmp*(-^Zy?Nef2f&{~U`XMtrTJ+>UeL)|eu=F!zMgrsuXd=kdbo<1OE44id(d~D`a z#i(LeX%nH%?c&jm!kEB6Ux0atTvU*CCGEUmhTR3d#%q+s{j*dy>;;!Tg0WaNdQx!$ z;`^*Mw8P^I)i-uYTPM^1$4c_38J#m>4ggMT_f=%`4C+0;_v1lxHK*(F3@LC8hG1tp z)Ifd(UDw01ANtEe1Nr1jQ`2e^;3-0(jKXBW4)A>qn6{v=#d4i}_fT7ZTW_6x{V8uc zZ!!?u84|40<3a)4kzY^gLlF2jKTB}|2|wsM-?uifbDKRNPXfOFV|HW=zRb{BIHvu& z4G^-^$7x%kLUr+u8m~YNQc~kL$2DspUbI~w@@p)Z_E?+Q3G$Ka(fZOC2tH|4WyAz# zjS!zVaT`!W8e!Vdo~u%X^sk-wMhOh2ABJ{@u-^>71^pTR!LoBI{suR)MU5AfJGxCg z;12?k1ndorx%NhsaDDrMmip22@TSCh#P~3eI^WwE!9M}^^$V;|6f&STp^iO!TW&}M zLK}%*@1j;^_zhkWq!(}xP9n+0N_I($#+YBBSlimWSPC%P4q4CgEL@LgrHy~V)Uum4 z2!b|}`iq=e=wgC}7xp|A`1-m!SWfaS4R(rrzeQD z_$$7xys5_4cSfBk!0)Ccy&B&yQTwwGA52832!eo#$R%k1+;~cm z`bLW9rv^+r`p+QXr@&)>{v|6U`npYSE5ik=_ZhEl!gRA=#p6Yw%rp~$<`4aEC{O4r z&jSNo2%To!NYhy3$`yDQCq z5zXrCP5;M=@_=P5vMPvDFrnLT0nhj_T|4cdJ_oS87M3WC9kPCxrbB!{WEg`8B*h}@ zoZAkgr_mk7%B)hb}wQ19&9FzU{0VO;APlH z79(8Rc0henK*{~%rGDgRyQ^SL0#ZK0XRw!Jk6{o33#yINYHZ78)b9G@44&aLPTikO zcflBmXgw8XDrR~rsW97u2iSA4VtWp|4sS?6zXYpO^+b1X3BIITZ^ii=+SQnp`J|UX zzQlbczEivAaH*P7<9Jz8~PL^weq+>@kQY>|3+_CMhUdinfuZunPp z-z8An*#?$%`DwXvm+gPm^#2{cu}u9#!7|jHKam|N;#OPsuxu2mogW(r9 z(?X;ifhmlA9qiFe9mfU3dn{^#aOYGhF5O8qKEmtqr0gglqX9$0ra3H+iv%rByJ2LY zJ-~DQTWmL(N_1(EQ%>wrGB7=NBGt7kRl{Y|7TbyJ&$H z*nTQ}M~x?KI>Zk|+-+uI&OF+_tcgpx;YFk4q7K`4C!{TJW*5yZwDF1=*7dAfyU+OZ znrO?tUhX<)^)(}8ZB=v<+I8w<82pPW+BG~lNkHo#)KNP^Yb^%e{H{GI z%apc|L+^=XU%KMuz61||j)W%f*jIE9(UIO=7aCa<<^b#XOA)jA&E`Wv5~iZ-0k2-y z&kTHSP4bX$fX(w<3-#oO%ELMBj|X6s5JJikd)`6q1L>E0af~+32pyP$`#)t|ZbN+h z>CC6+8vl!}F9EA@`~Kf&Xw*nFYL+O?sVJRpqlgMoG-?!$s5G5KMN%3>!cig`rBs>@ z4N4j`8k%TClTu03f4wK%`~5xt_dd^k-0L~}-S66Keb#4v*52=1rr2_O?$Jac^zg8! ztpJ!(1(+f_t3^5^^wIl^zj(7(xQh{N^*F|3fJhX@i-G3M$4g)5l#umC9*4gaV84!a zcQjJjv*a5pN`UCVHggxU0n^LQuXcAY0b>+hy>a6iwX@5Rr{A#@amX?G>x2}2II1BSm(`$+D`FsmVv1MA?R|%n%=}uLNe3e8s${vgqXZ8 z+)fPA1g$Xj9=Om}#e7FCkwj+mg#XDSqP>OWwX|fVtmgfoFm84_(1(`~sA{j|X7paa zfsTB&nS~eG_^bQlEOR%L5sHW)=u*g!CSFfwW-OL?#cFeuM@QTh`y3rnbZ8gs^-|vq z@V|aaBf?ZkGc)|bqj=g&8{iqq9%n=TDc=f-*Z5jnNnab;$BcP!6x$d@!CoT-?3Z2n zA;XR;-7~A=-0A99rF!(JZI8BP$BZuBr$eSO{h;IzhfZngRLRPOSMGytOb{~)5Hr>9 zAD?qhGTic~hhs2%Cu}cxiIR<4Ee^|1dfJ zEwz|RY8>e;W_b}5Q6iGWPUJhaA4VgwM-3%_JHI%x~8~#!J^?p4_R}a_Gz8E+DV2K%0#VvCW64p|MMc-tB0z zA$1;6B#?hGy4hRGFJQ(V90$|q@w`1(n^l5)!gJ%fg zk96(@`b}nD6!Q1+=&p$sXI8So$?eG|KO^(4p*$lAtf_it^UhUzSMPcM0)}|_=RW*X z#e9^CUp`jl>qpkGuR~ubHKmt;`DP zxcWf`q^j;JM^TQJsJ|euH)mtLGa`!EbHpbFY2U z;@accs1tTZ=cF!FNN`cq=Qh-SE(5~T{M=l#=-RoVFrbpWEN(!Nmk3#}?lzEy{T&%ST z=)ism$qq#QWFCskdj8eb1(80gJ36Fq=DZNs zqTEn%C1T}rM8#b6+mOPevnzYxQMS!X-V*n``ZKUtA%rJe_!8V=7mFS^?WSO@A7lfaOhNP#I1YqFs=Dxa{*wo zyAi%I`o2%cgPkQ#!pds1*oalih~Cl7V}UyrbML0zi9FSHR5)_=PKgRvDhS9%5?p}! zJ-9&SizY8NB}2tSh}{2V1HodZIOh>8%@iepb{n5gl_M*AbOI9g|8!`LC|V0*5=8_Y zK&zo>1Kh!5Wp(w*9_}GJ(KLf)@^`2t+K7Hu7yZFVL_H0CdSr|2L4N0V*ppnXv0!n& znZ$?kziDfFFosRWtpD)6OSA1~4s31_J?phbb7n=|8N!0^iLtE2r6kX7!|enTb1$Y5 z!IcvHjh=U07f7@7yM%$Mo!sZ4!b=1Y);%eaF)p|KG=RW5ePko*^UbU#Uk@C2bwwoq z#hSPmcX&QL9Si^IL*QTD!0D8N&K(E!;ErA3UM_#!i=$C^_mnWygQUvm zWz)sP>78G<{9gQU2^Z2gbu$PTL05UonG>$4St6>4_qDc}^HCM*_YS20^U;360+=al zZjMMKXnjB3Vyb_?j)`op^QK-ugyk1VXlPqWyybG`W*iRn)Psfa+v5(+@%V1(B=z6f z#Dn^rVg4Q47Wnv~!%#87l6Fm|&6X^-SnT>8Y_qS6fl2&DobdI}C0b#(7ZNecAfy^B zehn;vNEwct-_6g?6Miw^(1e|b7>PA071yxSzu429i24j*WYM;Fh@vs<^_nzWX&jBa zI{E54r{|XK;CoC*4F#FnuDHHcO<*a9SjMgkID+_d?!B<^COz`gVqdp{F?hNdn~NIk zXH3@NDCEt%>5=LVR|U$M`tlA?OVL^VlOn)LR^=&l1Af`R)T}5}gc?B!Roiyv89G{3yTjxLKIR z$2Ei{^lDsm#eIq3lYVmiEkLhFN?T;zlw_=f^SIUqaBSMN8J%LpJ>B8Y&h6pL8xu0;J{t~QwhS>BeTjXsPBj0o7cv(FQ}TQ7PM9%y28_q zkIhxcWe7r=>>I$GrJIHnKiz1zK+R>kH?BwHf4gK`TGiC%2Z4cf@Ma$D5k3|y;&qFp z@yH{f!=AMMgd1w6XFbFFdKb^n{oUwQ1_pQm>o#6anq9#d`$L_w8!YT8M_#}BiW9Cr zV0{=;3|y4$!_oHNdmsXw6JA6uUV$~H-U1|QPH(Vp-}q8?kX!Pzc?yJ9V-icc-TTfS zL~d=je3o1K}WU&Vnwy?r5Wx8w(R2spbV4J&R+YFczpF)l-5`W3|?~ z^*N0%*uDrptde@eMH3VA`?!8bSAa_G=aSV$A@N9)Vjo7lV+P%?nX+d0Aom+06Y`aU zMdQoAnfaXXGYxLzo5_ac1Uno_>+8$u8BnrBML)}o_<1USz!wJ9pmj>j#Zy#h*{X7W zt+w}ce+=oY&P(<;uhFS07LK8o!CzAj(W^{Jb$zFYZIKXS+b!#*j3ZusMX2BQ6#c;z z82Dkm!=A9b6}p}iA2pfhU3D^bqwx<;0Gp>?k}JB#Vp%q9E?z&`Egbnrm4g^tg;8DZ zrCSa^Z{CFVq`L!z*l~zoA5;;4n8J~$i%?DF2bOgE_nn_%E&8RS?u`bA%sroAU|f+n z&ox=W`6LEtoz1{50}d?KzIO{(h61m{uD-=dfV90zUWt(_PHx`Vee)a>Q=5sY`%8ilQ#DoXDWvtD(K*q`*%1dor|gZ13!)W zCc>8#4ZtwEkYP}+3jABiUeWwa_{1JHI_CGc%evI}sKOzvAFfYtpGFs@7m@^!$BxCb zifxVBX2NyHHI09&;L>dGE`(s7dHlZB-s}F?r2DmJP)yCAn|RRgj%5geaN%nkyZLZ! zJ%9RBQ1>wuI+{JWPEzl7%5DEfoeHq3fUZ zS}zy4cd=&Oczhe|B32O3vo@W#GX=-Ot7HUE0~cjli_75iIIMAV(#2&tXyo03d9z~k z=KS>Z7@p2A7d#%Dx9;5qzKuxE4Pxp=IrqE5g=P7=P=nT(jGzb%pO5gA>urFAeV){v ze~!c8-k)~Lr*Zu54tNj{zDUPMZ^GGtoe)wVJhnxXw7xw}yp>f&Kv(uu=pEg&#yAFU zaOG}fF-!ZH&ut?Vo;wy?7_{ybpZr)jCjMQiX1FPQ33rl~x~PX~^G7@OmhK6y&{}nzz#hQ@9>GdxNFhEvlX?$!d0UIwZJtujFz&dBrS-QLGpE=v_r!dL`lTdRn<7bzsCo<5bi*t*RMQY@ z7WSN5tB-MfK_Ab4zUk5}eysm{B?5QS-b4Q}#!7rIM`RhS22}j;;x*|W41Kb`%)U8b zPKRuI0nSt*2b_4ot8RRwUK9YqA&dUtnfA_6@J;ig8+-6ZMIp!E&k{B*ix85FT$#X~ z^=vxW6T|OyU3=^R6#BjgbP2JG^kae`99n`I#&z_jUyjo?0_u4QXw!Q3(FQ{7Rot>A(3=(I8fx zNgUipcp}D67M7H(0vW6?-?C1~xGfZ0)H}-j#tBKVqH(V>Ssd=Jxl(;4e80fHixpO0Os2q%{b4ntkKH^5bka0W>CY>7S z4=PW%`XFji(T#dz!94u&mcY0ToZrQzk|_==bMVw2(g49n^sCIo>wg-9T|9eM?LwZ` z&l~v$!{ zsH)W&3-Kp#jXuKOSTVcWClqobtS@Hl6*i<$gjIL>HMcDhVygi|ucM`nxwJ0aApkHQ zJQ2xxb(|!5)|ZDdpev!h4hrvv@yFX(vHodJUgf2E0Zzr;AfsHh;p99JvNi>nGnL0KK~ut%Hop5W$eN@2Wm zq>y2$gy?CPEe?vfDn3e}Hf7#+W8=+QyFMw}+z}R`KfGZ@ECzaHv`^}?Bi}W!p8KGl zu+XV;@*^WK zn+>qo#cc0EqdN_-v)Ca4*VRskda5v2Om*QkzK!|$!^#h=>HD^^qZ%zSx3Ihlx_#BB zG)p}D0ppEADpxDelxG!S~=I`85jF+3~>boB8}5!Sb)MalJP=ksO?4 zRg%QODvYiwr~r%-Vy?->7?ulT*PYV5@Ddy`NWz4!X6OVs=s(iw8X9{io@`Dvwd!c6f@8%f}iDEws6o zk1c(l5j(nCUwtfWDrUXvp)eJ%t!?{&v1*&hN<;z(^87U1FeeGi2G@wRZU}LF8~kb8 zYS3Ww)q(kDJA24jlRSOe!LK~dXMQ4oZCv;uh&%*>!g&qFEc@blduq27#czWAP6DQy z+lroW5v=Pzmx5ui>o58>h>AYQdyS6102HB__}*?_d%LA|&&z}i}KJ}HGUJU;M| zK<+octU*w_h4$V46m_Nh;24CRDhJ_REyeLzBqX&vIsm5FH5@JOO#4SSB2T+J!tA7@o=I$T(_!k5&MSeuQUM?<75 z)zqK82IuxH zShVJ@{;jlWlf*+E*$UDR#DJ)>>S!Lz2BH()m|bDauq=RSjAT+H0H1Yr0%FC;MeFd0 z95*~AOR9QUKE!}VA)VVdU*LB(S1J9j!s}=+vkBJiU1+wI`uZbli&hT4k`^6m5B;~0 zh7=V$D*D-64klhR!d_{-Uz8D*hJ$VT&x&ZGCr~YZ$Li=Rz|^jpL^AI2foTG{a`FbI z3easWx*CzA&=7I#M=0iI3N8br>-yKE|8M3}#Wu)T+}C8$KBdcneAxl+YQ_wM(932x z*K+pAM_l?mj(X1*E4gw>>k}cbe7sVXFN9_DnBBktzt)^_M%o+x>@WM-+#45muj?q> z(LymYqBv6_W=R1}$^cuq(&m`=E8PaZ5_B6@Q>pR;o3LF^=Q1KMP5IXw^L@c51SpkW z;{+gwv$7AY_J%Q-NMo7hTZHe1-8A1LPwhGg8URZQHhO=f4DT~YI;#dbvqc5<7Q-Sm zv$^ghABInOvbzcW>$bL@wIGDKb+hUv8J`;{T|5HH&&Z&iB z#yH-zm*yi_iOX{j3OHq<1!)!Z{q(HYv^1oSQzM;c_qQ-{01FgGxk55Su`jBRQIcK- z`HEc$snCV->-DdRgD$cWW?;sS-|`3X7=5@6@~%c?*+ANgdg3Gzx;(R5rpxW)p2AOd zXJK}E#mC;400WOk1N)m95Ro!&gM($3Q@l7>8bp>B(uozH!<&bBOZ?>u&1&3qYgZR? zpj;D^^H5JUgt_PQSZ?yfF>qMtXk|gFlrjmu6XN#f{c1uCd0DPgr4oz;%sZ3qMF8SR zcmSr{);IUKfSY+9UR8`}phuGnlg1mz51R3g>_#~996+j#{r0%_6!%^9#@)Y02Wvp> zA=mtHEiEX;SNU15A)={m)$t+8`%$~)x%*VcV;Iqkp`z^yz_~#Xp`STKTTt|JUhL91 z9jF7&S2PrP@4zs=7=fx8Hr$8GOV&YRcQpToD>Yq1BnJpp#}k3xw4Yk|6jkyyq%U3O zq7Xtn&YYayTH-(vw$(#Pei5q+uL7l26$ zNT_yBYwzwpI~nS?!bw8`)s*R=T4rGJ{*Sohs?oC@N@S79rxL4!zU-!pRc92(sGh&0 zf_fypFxU$7>{`dXuY3hfgGAvDNVS5n3sC8?^|!1+ZsDO@b%w2)sCLj!M%%BJc~Z8J z^ShC{YRZ}ypeE3Rd=}|H_CNa5#qwBj&t-7Z{E=g3yKO*tpLD}+5$Sjw<&0P35XH~O zlx4CzO%9Q^YG1=-Epl+@*wIeAm)Fv7!8Em@pud+9d6Gsa6{J51L%(TtFTm)ZGL3^% zh!`S%ZVG@b%hM@QdiKq#bS8&|X{kaL1EK`7sln!BCOqj52x8T7A)|+J%iSG431T~< zBWSx0C-S*eX%&>`R$i7<5H@fLCu_#m@?z%UgK9r_gD$dRgw@ouQ#7}Pt%K3(rI=hv!Z$8XZ$z>#7crtf8bS9tkn@G~&i#h#{5$ zAU4=NxZKq!i-1kPYla%0V;b|)a0 zxj2gw5zc-CxbaX$!M>5w{MMfm-8Zu`ht?@L;@*b2SNzbE95fI8S68 zULZux*dG|hfp}_Q>=py=z9!4d?E1~!k4&*5xCg6>lhoRKWA&8~Fo^H1Q#!{k z@iN#OtAI%|l)QyQ0k6KY+54`Abb(zEIU^>tL=Qs`nv3Je_h21Wv)6oQ_&NRs5S9)Q zHsx0V<$*5BX=B0ljbTCK%}g#ViHvanToZCS^f~u=&>t6d3f*t4S&L~_YB_?)^l!gp zI8-vGsvClcqCEQs+@EuG_WdmvHukk=!BWLL88J>8HoyVp#y&j-UjVCJhdU3cHP@duW0^!wGmFk@} zPrxW2FJJIBsSKbX($lAtI?Gtr7~-k} zgOM;MuIc9jSFYOFS5opckv~xzZjo)x&o|0Sf$d1}LQY!hi5WJPRzu%3vGGT_Yq9am z`R(7L)qF{AH03hrTUB=$%r{+HSVkmfFw|A;)-&Y8g`Z)af^+pCVqnAy4Eeyw!u#iH zKWVAIf4UzJtZ0#ZPTssN>Ki7-(gTsft^jEvT;3`%Y3+W0`}J0)AE2VNU??Jxlugjd zY_f7Y#)diWI*b`Oz`76n9Cs=0F$%VH$RJ2vkb=|K)#2i`QRkos^at*612*kjO>fJ|Bc7rne6u(x?pQqx-f;22kp8`v!jB#!<);76Gy z5{`n!3x7yL9N))A2Zzi7mwt*ZS$x0MhtfCUsr#K*<7HG|rLRxxldplaf_HgN09)|(U0$kQ*lj(wwDfH;)qcOR8gvC{+`!0CrPtXtv7q}{ zQJo9NzCG3fBzCU~tVVNezA``*ICPLOCPIgMi*%qQFFcxb?1(`pBGIJ&t_WrVZBWZV_iY`c zmqcJp5)3-91$yGn2p3eY~TFwZARkknjnI89{ z>?qJhKYg(3iHT4gzs5t=x++P-L=hH)rZ7Y1N}|TR_}ff?I@l=&8@iMrQC|j}Tn2rtGZE4rb+m5lTpp+kfXzghYyE}AiQ7tC9*gDgrgL5+=(lkoYRrA5jp z2G8N2j5nr_@0o$S%iU1kWPF=X3@z3dACbc{O8A@rC;R6{!StJ8!&6CWFVnSMU=}8Onvk{O_n^C9P&~Sd{2o)k8EOsj2`XgGk#y1ql_n= z!AjF&I;v;a<(>XE2H+uW*qY3526?hT`hP>_jrbI;eF+kjIrJvTWgrFHtSCj(J#Gzv z^^@+nPC9cU3PGKu28Uf#uV$AQuJeyI7Xlkw&Jyh3*51Q*XZ%sG|838Qg2Scg!+|N< zjPE*rB{@K^gTL|&LI!Vhwsod3aXsi@L_zGl(7z)0dj;)%=BM*UXFcqbH(gctFnQt6 z-hvHFmX18HX%l*Lwin5J>zb zxL^|nOQ!sOO^oeE&L#+LfA0U^&I^nhy%nQJ@Ax4+W?@gvHc>m78j?v##81(Agv1wG za+GmfljfB5QTprm{219)%^u&mD9#|Ri1n;aKBKt)aIx`Pxs7Va!*C}Pbms?4 z&mn_PSg!;wlJB%ptfh|n8px6~woz2L2907#Wg$%!9Xz{j0lHY13cwmkVkm*Ps}^Hl zS3-d@KivnRb1wN-?4Z?Z_w-N7K7Bofgr5V=mb}4lZckW&pgf}Pf=+g>wcrf?P$10L zqz&G@B>SsiNaZcvWm9h=MWG@TQ{t$qC)?)?&dOl(y$TudEqe0boixzy&0Fo-IfAHf zz9|HA4 zfI{^d2k4@b4qLj#eYvVky9$Iu|JM6QRsPk4Hq0$SSYX;mu=)ty<6olFpj1W_Y&K5# z0N+3;hK^C97p~?wKWFbis17_AB5iEkQ-`$}EW2$iG#LNwM}Z_8yEz1y0w2gV=3R3P zXciNampmzK2lIFN7Z)c0OJuue?VnA@FKIBwNxppSjYIkwO>q{QCDxk5I}&Mw|6dXZ z2k8@Gr^BlJX5WNKXy9#_;=5#id7E)_!F>XB4>~9$-4oI1;Qf0t+R)UI+eNPu>mmZI z-Z%^>oDwOs|M-8p#s6wR7^Z!0ltH;Z*s8sNJyzhsA29LpH7r|rTaK2`dA5V8^h_jq z*iM{yn%-**w@tt^5In356u6;KkgPP{LaSe+4%AJybt{nri|g1HB%|M_Tp8WF z4F*`S;xfvo-2RCZJ(`7@hP>nf27an24IfSr`_+=O8@JUYlt@|oJ0Gcb*R8p<0qg5G zc<(dB8o2+dNJ!Y4t@g~JHN=axabpp+919E~wziNH<;i7TCml8-^6`<*6#7eq_H^Jv zmq5Y8ig@2M(GqtC!o_J$v5*}1H|YMPjqbofm%n}UB)2B@#6xSUpZ8+kOu22($PB9x-m*(i=NpQGMAs%u_|ASA+JQK z0*w`c3lJakTnywBZa~77vVkb4|5E;vC5@`nb@9e8fLbXukN}0Qe7wCFV=n<2zFFPr zzJ2pzE}*f3dW0wjKyaW)=sy|(MvGPl-gSA4@f&U?24o|iX%upzO%#_-o!CfQyTWYv zp!!t<519xUGGA9`Z`uuPkBj^PS>a261CN6_7E6G%R{4)`kw~aVs9He(VwysB7}s?| zx7>*r250$I@a1qFVm65+Qt%Z%EbXj#@9Q|ZAVK382NB4g&C<~2!=ePbd?`V&v~ZCy zsb{qdcMYTmLFmG$ask{=xh&Hi^xF0jYz(@5Ws=hMAsE zlUBr1|3j^QT%K&nb>fMd0##Sh&_BKgHSX`m zDJanOsleC5v7H|>;9-|@fXz4Mu_7wZjB9&NEP?Bu1QV%#vGLjqpvbNF=3%QS@FMbd z^KaO8%&V|=|Fco7WR1ArT8MLs)~`w|&z=KSivcPSQuak(yza3*O3-9?YG6J{8x@1J zaglFkT)2H<;EF)!BLICsW57BTU$4`a3cV3s*7Wy@;d&LjiMh=sLrtxLbN(=CoIJXo zl!JPhDK&gl{(&QH%oogr^YJg3#m$6yT#n?PU)+$RUXt=OhFI0`hH=jOjs48yg^%uE zejVrwTKZMylJiTFDUdEMr9tu#2_@g6|7xYt#u$GEpDg&R% z4>(u39fS5GYl);UNsjBe&7g%EQdH}IgdAJ98*lvB#L%S)#s&4W@^cBuH`Jyg?oH>r zv@Kywk3`dRT)Sz8Ab%|m7Qg%fJcXj9C}qgu3fvpTD@NFJJ z_N_3-BWd#@CIeT#L938!-f(sGVhi1fCWY|lc!O>kPB&2Z-zkBSKO3;2v=RZ?p=it+ zB6a>p2(=+Cbw+q&QO({FLlYUQoQn>L=ns!B895ZL1M;3=Ils=e955?fl;Ys7tX<4H z#0$J6%aHRt@GUqQi^1hMR1BLsGa;y;JN~dUrBXf+G6bRAd6{FJ)aI+ z{9d=UPrJduE9pW~F%^e{nZIc@%Z*+;HwC_)&7%zL_!s=C$v-L#5(W;%N zs!OiNA(it-q9kN<5Bqa4{$+s2C76lMziz8Di-SA!EZIghC*Pa^EqK+^P|w7_Zic#t z-o5mO=7*1Xy0}|Jijv^dmITa|6^pKAOVIAnTpJ=e#X1Ax4Q#ifO&Uc7ng~;)gy9kB zcI2iLjp2OQNvxW|1&x-rs&9~NuY@qkdk(8BJaI}$TEbXV%@8rC-jlK10vURHujlg* z>$Jd}VFB_cKq|t2F{RWVST#uB@-@kEtRIURL^n$l<*tWBSH*Kl%s5?f%^F0q9rX+` ztuw(J#|CEX)(6tca%%!4O0*|QjpNfkt_Ojle=<}oNFjPU z(xYeC1|J>>yGw2m7X=A)h$8M6?eA(5g2%>XjyAvKA$A#sJ`mZ#IVuI2lVzdf8uF?+ zW1^`=WVOLYg$iX{i0S@W@EOS{pHiKFhh67MWIk#dR+*SHL7IJLk)6@k0PRoozgD z)OfILTt8zCm)_O2WK+pEX&E~?iIV4QMow?S9ISG)^5b? zVeHX|d!+Hg&{h8q{ev!k=;dWRGFJ7sq9}RAqxgTs+@71puQ3kO|7M`?wF#=y0Qy|c zPg~MX%f_~QH0fa3kTzY96$7q0c!3YerR~?MoPfRiW#s{*JstNNcwIY4Nj}A~-kV zldTK|3}lY+-;%V!*K15u1A%}O#*~}7o2a3RP-7T@Yn^_`yTdp(^1I!#vn~v-)w5uA z_GetGQqxt6`$6dpnI=5_-jKu7aH6t2x?YZ2M-{Tf1AOa{?rge_BeBZQS? zed^48f*&wjo@X~_#HgW*{~Q9O`FkQg{iyI|n$F|WHo8`~0k}Hsuf?BMS*uM!h5gmX zJfUTB=(Yc+qZ#a6B$%Ygfg5{>y2Z%BGu(*VKL!3a^sLQ1qi~@ychrGPl&c)8Yc`O@ zA5w~mGKTPOaY-yMWI`9IdY9hlJuC!y?(y}xs^AVmn9)1Y zi^ZpBU~dY^Q)xH+cX}KE3X5CjtIS!%@pn%vB+`n%X`Ro>k%r($F$DKX!Xxe3c+=iM}>l8lRkbxOxlYlgl)!1wF^k8H5)n;QfoJftoklAlaq zWZ#m8mH*h4eg}v`z&c2A6ggOY8ni%(0yTN}Rqko2Z|tD+UZ1PwD}>ipJG#6y!WIcG z24SvgNizTIjVtxJO?t_d4v*y%MX%KRZovVTODejV%?N%$n9T zDSfhvDK@dDAO20>H8*ccHiB@!I_TYp@hun6KzU~Uyw=|dhFnr=OOt{}9|U&;Q^`zK z>o}hXf~}H#KnOg0S8Ykn{Kd#Icniw~7fqh-;R13*;l=`~W^jj`juW`O_Z|?|FP1v( zS|?^h9+1ZIB<(hI>3sYa<8xl~HQ?B7x{3^P=220NkQg&(2*M8Ugg)7uub^FA0M|hm~@Zz&XCkvwB%Y)q$U3 z{~M7EUA8h3J!$j)v;5e+p}8LS-7Xsk!C$J?v*&KaMmW~ex;GK4f#VxARN$8yY?;b9 zXnjOV$m#q|sea$|glX;dmh|_VUTPbDv4ZYtImnlY5^xuilqiJZi15TC%}5a#&=dd< z-r@gxXeWf2F#`t5o7#@Yn|{c^(sU0!ZHnakXG4U2@L)YIXGarSc`&jbTl&|4BRihd zAjI)md;%Pr%!93!NZJp{%h(U)e3OLMLND`Nk(20U2|3zW=!&T!Ejj-WkkE#EuEwEB z83rIJ4y%SCg(tF_s@9IEsqH~wI;$xf8^J-29EEX-N{w}Yfr;W4^1s0JdwOSRo+*n^ zRh|FnQ4u;sZj=B-vb`?`L3GA9ud(zR4$L7ZFgu}R+s&A#?rFT29yaAgCzV`Vy9UZ$2eJhrl@-$Jhz(1B<%A9|D z12ucO7$*nVAYDnu{KokaOj$**Ps>}qnaJ7(6Rr0jTXPRa-lx>M>17m+F;n|svl^FL z|2d_(y(I+v=Sww=Te<9KOjV3e6U|M8vumEhleREHckIM09Gdp=`*|QMx#V0-39Yucmo0pGlq)pD-(Fj<8*eDb{ePe7@ zSFb9yfKwu}0uNR10zg!mff<&@lc?C}Ae{UUn*}|Z;sY^)BE_fXMx!0x+IGIFLQT6-r*sczm#d=0wHI`g^ zhtu#@0j?npgPz>i6YAPW{BgN!6CzN6eI|Jj+A6i76HE3oGBqseNJOBjI8;EAd}+CE zb@-@9k{lG?f4)m%HJ&XjF;@7FY=VdG+4mBS+*&6EQ#)!a%l0D*MhzTxT5qYr`s3U`Mt}A**msc_Pb9TrI`^fRHQ}>BeCgLDcUDJfriIU4HnEfT)o! zRYr%$b+EZmSb%Zr_7q`--Ji3TR3PzPnlOVjPe+N%K;4@6K5>^wis&nW2VHCwU$JOU-lNYS z(G$2_U^oUIPm+d;(bExrY6#eP;V4M0zkm_~lPH0ru1NzbBeXs9{jHUEem>Vs&&kdL zz#mY&Cqj0_{suTVpBjg5W07D3^-9@?P!c01UQz_da975H1+xYoCI$Cwv^j`9RMtw+ z^!Z>wroP@b8IBhmu=o_6UzHqMP&hRorJ1M`Gf{pJ2wp-Bf=l2kz_~&kz_XqF-WVRv zhm2W9Fb`)AH+J_m$$NyYs;-F{aO(R!Y_R z1M8Lj=CW1*iQ#~v;=r8*2{0-Ahf9*;boQZ5hcx~_K5A$z1? zo^sZ0_^4F$^hmEL+HN!NeR5{le493Jr)`dM{D6~Zfb$q`{HW%FavKq)zdFI@!hc3z z_K}+5?4#2I5R2uyNZsj7llrf^<^JNOv-!h8W?4N8&l%_`4Zt=|oW+rQFgjs#V;@ks z6SllyH9~_GW;I9O{>|TDxmR9Qk-f1x20A(gCu^JC-Zi@o$+;+h7>g@Z+F$Of9C&0q z*;&P^l?zW{!Aepyu8h1L$pSr}U-;oA+Z3FX>Q~fQU$0-1E}V|{R?4VAeFb`1cP;Bb zT?s6kPOtVV>i1qC4+`3-=?o8TuJnckb$9qg^aq070}cP_)S(C?tnwPRBkx!|Dcjw1 zpwvOAgehk!P@^EIT%+*eb#8jeyMU*HQ)LQ3ogmyM&#tYaJ_`_(9qLShytdRn`aXEH zbRpSvNB&;whFVK}^I>LhTEPq2j$YOfYiq&-w!l+8x%(EfRw{yLhJ&4z>4V0((UP*o zd{5=kI^NE-nlkzC0V-ruX@;ZQf=`_bF1r=Hy)bx}=HSHWRHB6*`tX6k_s~x@)w^dh za~#)rH28c;z~Sj_6LjOspNQyP`mEfKwqH$2v)!^;Y8Gt1rS4<4Lab*qTk-*sKG)4d z!PV409qqNtKPS(j$MXGwmB-;7r8;&Qcv@2&wK>_ zR){WsJ$Y?t;{s5GpM z(7bsptg2qZCCn_VGq28~L@k$|11&W#3M|@fW1Q67lI9wc`OnknWeX5izRv}?Gs)=~ zX*Z`Xx-zoP2~Amz8LpEdf6Wh9Y@E`Z_|-`<-0vrp3r#z6;m!Qn_o*G_ACJx+DU{OQ zI5AjwIW1b8bJuXG2C8h%9=w|$^!`B3s{^k4%Q6Rp^DF3ih=A+q@9EIFG571BUmNek z_D+&*ooC14VQxDs0VJ)sZsj_CT8a!}Aj;>zziq{jG3v*je1Xfwd@JE*{S1VSxM#;D z#))sKj|9hWV#*^+gV&M%-BO+MTt?olt&6T|Ro@kc?U3ZF(}RgSIX|48u2`3`V%5br z1n;)r^ELX}w_=Tx{n^T=ha}*&I5w4o5HpSWXi3T6adN(S{OF$CB(YuYOEh$5vGRE+^?8BXdLimL}a2mz4DOZ zmvJ@4px$8 zmnzT;Q)APg-t{GLOiQ>bI76BE?y-rKlSYHzNdjN^Hx~!@r3N{Da+U`q6%4OBmmU2; z5c|}K>0&$&9A#HPL%o)=3*LvJlA(bvOhy2qfA`?MVW^Ui=HUCLiVuX4@jEV1`S^I% z=uCPS%d`Tkk2q>eJm(h6u>H>Zp5ztbl@m!HtOsxn=tpbb`ZNj`^y8`^@J8ccWsIRyPS}Yuy(Obf-zkHQ&vN65j?j7E9R`XHs*f+>{p=aYG%efZdbInpfDByfPOqk24<%q?`p`s5z zKk3~^R;N;e&I^N_C9is$v7Db|Z8V)*)BoBm-NkuDgxy_<%1IG&lnUh=^mzB6eP>d5 z!UAuo0FEqjBa7wwlULc5Rz0tteT#B4qi@I!?0L@lsj*LzTwt-WrD${4p%d)h$Jq8% z9DVd9QIJx83;JZ$)PxtqULA|C zgO)d0vv9cEA5qY1Bf}I6;T`yyYpMK8@BHS{)!1_#g@89+5{6xhyih5ZoLC=#%dBkm zl^41sjWkZ@s3ujD zRDQ0OopW%-)Fq{B;2YogeVR}XsM}niZj&aK8rI@YPc>9Z z+C+^{5wszB3pib-3sG)GeB@nxydMnEf&lrPQKkmMU5|>0ZVSxAF=Pcd37RDlUEwoF zId|)u+ix?{q`3WjRHR{J(_b$ztlo1;ZwB3tno>FAr=KTvpX;RN%8rKMs-~<3-eD~y z{5rGX)vQ^vq`#lg$DVo_hBxo(vtMOaX>uL?RK`!O=M89-7#s8-?q3X^mYH~xv$RP6 za~sB7Yh7_jI~S}i%|)}ePk#tku0%uVF3vxhc1sd6^qpkOtOW9Mc#%lz zRf=xWG+)2t*F?Z|JZ0Bc?-4uw9~6tY1U;8=hmpvSW~~hD@R9+)H+mO9Z)ZPK)YOnq zi9E`Bk-RNwASCM5hEMHDfxYI2Up#Fp+{vesh7_OHbWXanXT4CNhgXc`$e&`puEwSI zjKVADoz@|l{Ofc|`XPp&1?CwKtjm+ieNVJc_ZCIc|6~V2=4bL80C3r?N<9H+s8iT> zJR;ppDq9AeGa_7LzYa33;ofaZxUO43tSVXs+tQy90?)(*m$@r5m3xiTZMC!6VB)5e z$G1+Kp9vuPWThngN%i$hDm&d+h2u1FzCF;-Z&@79R(%otuHHYd)$G9dAcwwDF}x~V zKTG4;PUdH)G&_0ARI23T3*B^z*6Wg2OAoLX2C!zs zv0Sf!J1u&TIkWQBce|iXc~B)~mkzHuUkXu2D}8>Dv2#HH-Ulp5WWB8Nn)PEM z`#{BjV@B9^1+IFD7uMd21$)?&LzsUya#lCrIrr0zLw6mIP3yiXzuxAW@!4sgMf;Ip z4l>DT`rFgRz(ps+FWPQ$nM~Ukd-Ar-g)hzx@0d{3vtvN*N%<#YHf%}T?^j{tEEf1w z;PX$7-oy{D${VWYZwiC%zwD6YN~stOG>r4v`D6MzP7NO8r}Y8NN~RR3M|dt&m`jlN zl@Ai7Nbx0$#z1@}d(CA`o7H(D?%U9jceNNNW{-M8`YqyCmNacu`XZ?r=JV>G-S8Qk z{nL}59(<`xP?R?ohSL5guX(ij7*6=g1byz7`kMO;qJk$>RO(Mog>us^6(g(TNfQQF zyRMPaE6t9NH_cytFh3*t1{o_U8?bZ-^;F8tJvr zRbJ0MPBrWPm9HFgMx2|-|6TWnd1y&T`Dl5&exJlj)8Gjx_ON2|SO23eL;SKsG7b>3 z_=cVp3=;$17rHh;_n@sUC=t3kkn|7T8j^5Pj6VLT>MO^q`mV_r#UCZ7YdgyX54 zf=b4O?KbfY%y*b)Ep89LVBSo(!i)QG-A7zM$+}X{`86wYI1sAy==#FDhw9Ytrnle9 zVzah|ZzI`K@R4ih@VP$SDQWzsB5`{l485@^bqjG3-`e zmw=y?!b*xCuOL(pM*taK@6w1cax#dAa$WM;t4tk_V7zgm$ruRf@p@I0vJDk06Vo{# z%_y9$qMhR55zf= zTU?e4G8Lasxl@C0%#6-e%4s#EP*EI>A1Yr;lKsp;{Q1gPUUQuRXsoRet%>uTMAG3E4XZcG zw+NK|*%qwReOYI9*eEA13;1OmZ8&7(p=21E==VA*L{@`~NMEzCCS@^NV!qwEk7627Kfv1BS9tb-;j8(HH-%jS4&o=Nh$nL&m(h1x zzqOauX5&?_RBzTjh43}77et<|FhljRokK*PnZyhHEqh z%Vu{~99kLP+!%EPe^KoO>)nL`HZCiB&AS>aE;+2WNj>6;^K3(|%R3!GZh1R9Rkrdl zfc|-l&>9MM;OX?VWDrSzzswORDo<3TTF{G(YANpCy|w|RJPgen0l*o!s}>fu}f2Sts9(>r8XCdg?Nn)R#f&* zFHhEfz*EhV2kus{%TcndUBiJ$CTyA>N{-_FSDmo0?TlgEc|vu|mx4vyybW~V}BT(S7hPo-Y> z2y$1zC@ULT@JYp{mn#E$% zc*U(JpSlI_tmBxOo8fWc(C6?mBE>&oYU0-B_x~2^zljnqkrh@7p~z;$_CqP{hsN-e ztC>5|g_{GmQ4Pz#Tzjlbf-ktuS}5ynPN?$w5ZX(q^l@Sh!R>fWxUzE7^jwYvLr3;) zsq(XltSheZy+!In)9Lr3&<~TRi96@EucNSGPBJ{j-t%Ocp}my3#8 zz)pfby+PaiSO>P^M);$Oz8DhRpi;QLTaaz;2EW?IBbp-@ySoHRUF>`v$hh21n36@P zqG`Sh5|qfnzN|nKfeYR0wKyC82jd3hyYK271XgZNxC|KL`sP?}gp5fBl2cbViu@Xv z?DY(4^4k&`c~5Be@a#viZ~xG-oFr0+C4NFa{pCUwb+mH{ncH&MUe{Y?rTWaBe{ZGJk?+P8+G?FzNwD1z?GoN*|5knW%YUh@|uEQ~78;E=a-SIhZSx)`HTHQfX zcQ;1MKZIJ zSi(Jh>;c2*;wRaq43icNQlTD7L-Z8K)Wzw^U7hl^b);Zv zhSvd71LKWLV(@h71dZcA)mS>+mi=AHVre%AHKzRu{5Z)rzijz}V1gtWTWXguw$e5s zuw<$`Sh9MG_cJ>u!O8$LKoy~8f)*3QKP-VBm2LtvKlKg&6=4%uF% zQIUuV`bmq;^H)k`y+VBs`D3!U;TFqb`gac-w~@$Y@QSghVqAKIm+-H^WxsTKreCfV zhpd%&CwFZ8(Kkn(EDk3A`YawC$D$ejNs)TZ^om#ZaO87-wACYK&iz0ntK3O!&7oyI zxfTkP;#Z*SQ8NagR_R|OSYch;T_#8L4*Q5QQmME6QT&jkiq)^j#RRTL8dMW2hKQk( zf4EpVu*86MZmd3SWoMGRkioK0{Ad&DuuYGrE`MkSn^8xHj=jC4%fKT8{O0-e zBtM*I`ffKq3CeS+`K|*z7Wl=h$3Q*yvQZwX?V%^%=iEr`h?+{h=b&ikq#@a>CD{CU ziyX&pf{v(q9sgkmGk?U`UxNF8Onr4g(|gqSZ;X_VD5xWJx<&{p z0tSlo5D66#1e6#sX{ilCN>Wlly7T?cd*A1I{7diEady6^&*z+jZ9X3Zc)xu`D5h_29A?^ZHI8N1 zki9Olg5+FZP7FujtNAfvoqPy#h3~iWj{FwEqzif~5z-=(_}IdSGrTdU$4*$&h8nb%qZXt~CUN;Mi7Z@Z+aMymbq8=W zw$nLLUHhyVpDq<5N0>M7QsO<>mFla4(2}eb`w->YrO^HL$;2c2wKGJUem6pOS(;d1 zQ*nOQKp+3w^o2v>rKo@MRQmmgp->|<8YT%yQ%eU>-;MnPQ||M9g-YH;6xlefSt|3s z+vGoVoBM~2hu1z&of=S>PX=!E-fY6B>$ znQZ4oT=>(s-*rG-ej?>c>!C7;VqvRt=&pt>MBVm8c-RUM0$$ZRLb2Mm1wguDT@|M+B|Mf6DDckb=D^Z=meKcJz_uPX69_>E11=`(f}pZJGpPP zz4e?!lq?G>6N7BmX>o#;?oa4+xPy>>J0~gewVul2g`le`&re0_dO)ViF+@o^)Oom) z5ynHlMC|!td+L?L&~`-L;Q&yIh0A4h_Kp1k!Ub!2Lc`4|Od-qm@lJu!`N!t}IrD!k|TVXX@ektPDW5Rd>dSkx+PH6-D3d%68zKT8p z#W~(2>KjErwfran$C4{4K+j_+GmJcKR+e#kp}!;oILxOnFxzi%jT%&w#vhsUiHpj}VSF5lvz8rkvfmPLu}g6-r$``oQ&y}%Rw@J&AcfkX1*T%2!Bo(}>)@^$ci=~WX@-Jo=Zv@WKcu@dBI$!%AI;Z~+zdT~>gDS&OiV9guiv&A*!hNT8e#Haa zA1xsN{hgcBtb8o9t*&Dt%is6(N^D_R0m)w0?ULJjuLI~79y+ZO-E>`4o%}4@Y@~Dl zID~jSX8H3Bf}Y5==1IAt?;wad>i7)Q!#&!4BD5-cc3mjWK8MC@U=7mW0$#4SS zzyl3mbx;04PBm08%?`k0b1xcXE#HbE3yP^r`K~=_D`80U9A=i`1RbP?(SOi5k5DZLhJK`mA;}jK z3JMmKyuB7d6T*!Z{dpGJ+Vvg4q5sTcarbr3bIxwEL8WTl>>F3YU zVD5Wp!7t#r;rSIO$(h zVf-pN=#68-^1_l-DhKY}IfWg|ew#QD2U_9LZ+)_3UGoJd{9UI+L+LXX2Q5?3c%5Tc z(tP4YLiK}l_IN8$(H(<2495H2iFz-4BOZXr%e`FW5Dmn~#lww+@itnVw3;CHj1$gr zr16TC`ldrd5VGQK(gXl?^grCvefJOO^a!4M#lZ}ynx;(FCpOW(d*G~K@HDX?3*ajq za^GbA;s??oa(T_uiVT~>0k{X=Rn6w7I4s5PCFn;LrUZNtk>Kr?Ujm+GCb7Yh2vazv z^jGefh}s;1!&p8JM@E8l#eDJN?LcVgGQ1oXLR@tcl%B73`|m0ja{L-n_@(7}VDBWa z#6mlA&}9Sd=p>nT+Xbb`O-uz~a6>mlpgB#pmG#P<+iY!;L2}Md{aywYVKoip_|% zEQ*EvzRbdMFA}ySV{Y#q)%<%dV`$CH`HbDHw zgK{`0+m*krfqP~wl}G;#1Ee^nP<-mhC^KAnW2-E-G5!a21S6jfAJ1#JhoS?WIqt_L z!)mBc)2u63g@Oq*_xuH-!GZT<8Jtnpyw@A7cQ7&-GzLq##`Q_Ih%;wPxvokyskw;& zMY0YQfWKV>C;-LH7-HoRV^AJZW3u^`L4QMSFNRzLQm@S5oN}QA!!hd;#xse3tDTB` z!fi6EWc~c5o`9tC^t#h<&$UHxZ}1h}#urO93XFOW4U*zw`Dyf2;?8@b=+7#5R?{82 zqH~nHLcMpF92NbXsUZtTFERo%h>wajxFc-WU^K~pX_5srX@ov&fH1l&(Tra~{`xt* z7n7$8dowS#!EH7jj7|;CF{xp?#73j&z(4xPo+$f`TAl@zG!w?2d-Ek|gI5ovJhV3h z|0v~eUC6Hxnt@`zWHkI{eaU3|zM%SKp-%7+aX&&HYYe;%Uw6oPJGm4EdIT?+?BVZMR= zB^(zb19JY9@3(e+5wj7p*j@|y|6pg2`e!~g03UN%9QR(P@|=x{o=~G)kdT`nWVptfCbe86!o=r&I)3l2&qnGb73ezv|+~6tz*abFQ1$NpOXIaiSluYrw#-9Y$$b& z$*IPK%NoScxrSVjkKugy%Mi;QQNRogYMgt@rgPiC6H%X}D6_oLqU{8*5ABBw`BsF1 zNY-s%bfxRg@L+IGolqCR1x55oim!6b9zJ+2$kpNdbdz;s0FpE`)w`wW{I#@eCKbZS zFXuxcIG}*C>>w2o$;bbqt|J!FA%qj4t93pzloYMT7F?ta?aDwEt^-E5HUzgGlG8&= zWgbu%e?39D=bpC9;?4g+&cMvXxu&ls%;KRA?-aaW;Q%v6Z&h}Y+$>>M?GP0Ni*Wl= z$?E^NWP^&E4WFsrCZ!C_+|~DOZ2Cn@4*EFF<^aJ#R6KvtdV@P85BSw3r!^U5Rb46D@j{{BGy%h`Q~M>|R@~fwTRH7z(r}c2EI&m*=EU zwc~$x*ee_o{`Fl#Q=AJ4Z_q8E$LtEO>z}e|fP-k)@;uSLNS|ba66d5RcboYZF31>M zc3WtePWjHW&IQ8-Tppb2q2Fy1Ufcr%(BQJnb%eJ7F2MCQoj(&Xx$%aByg?O=(7U}( z6T48=eg^l9Is{x+5&q-=&IjGQ6fd96;aAkCy+C)ri2X~;_(P^q+UOI-q~lBi%#HUt zrJNtjQkm*BC8B*h`_eoblJjN2e=m$lOebR}JgU28pIxJM5# zed6F`*=HB*t68LN&SS~zI>fie8Bu!0x`jUiWB2`<-U1ovx~5dn6|s5a2Wa{L9l(>Z zU@unG?>E&=CFO2av#5UM5jiN12QTQ6w3 zk}t-sNR|Y}89zUql28m4e=p84=uYHSe*G1WTM=#I)CGt5kae$2GPk|r=gISfJQ3=T ze;u#+ZITf}+eGqD!LW_~1?WqAV$Fm*DXq6Ng-HJUl<%fmV&jB{a8vrR&v6>7lK09~FFEADJx|e%kQy zE?7wUGugIizLgAmC&@ZWS@(2_Mo5zYdv_jNrWO(dIB|pq4D!rma~@*<%Nb6lampKD zmkCBL{kKUpc&f{{El|;u$eL&F{yDNiP|-_Zys}lf@`3OQL>j5Pg)<0iPG0K&b3T0? zMzDE4^u7*AgvY{L;?KUFt!{FO0$ro<1v-{URBF1mNrn=gO7ssNIRHNzOxuE=P4}3! zTs)fAR!EJC*klrRxPV=M&Ue4Z3YiFA<~5^yM#rjAT@%dKb>a#NeL(f|vxi~vS2*H0 zR$lWtV>T3z&T*RFpVin4iMhbPq!>1{@O{!;N%y(=D~PJvs_cK>!#lY zUtG#ZK?yUGl7TH^o|XtDJV!=9c9F;x^?uift-B`^^gVl7qt)xNx#^M!qI=Nn{Dw0J~~_H4+r{$VZ17|Cm4>%y5sz>XasIt z%dTFJGn^qH?*p6|TJ)i6Dqwr73Lpi=9A7T-Rp%yjlj?o}=@Rv4KKUSp#DvClX82Yz z_)Br*KU2KzSTcTDF<-EBH-r5cPFDJ`20LgDqzYT*&7K{fASpFyP+V=oy;13WWPP#a$uzbS2UVfyWR5hnr6-=DI= zyc9oy;m2=z+0Lwwf^8$n7}W)VRo2O$<0lEc>`(wANI>G=zV)QDtPiw6yULR*wvU|$ z_?-{IFgBX0#4*!Ipqf?piGrHQS{@cP-}p+)XEWtH7!wI_a{=Vl{1aLxR3;DasCkgi z5B0ag?#j((i9qjT!@wm}_LK?_nUAg(D|pv3}Dt>Lyo+5DDUbtUQU9cc#e!8(s#&L}pr{$2eMsdv@nw7^>ADG0Jw2 zVHoQk@qb-0FlK>6Lpt@P>egNL9D0BlrDpEC>N_4dr+x-l(Tk?xl(iON5(b&YLY73S zq1Xaf=nv5oK6(X2k6rH!O4>8Qn@tZz9X+OKa?Jq|mP1JYQ81W24XgnBxPp{20{{5e z{`N>S4B_=V8CEO%PQt9xjd zv*Z7aV)nnW{Q~MNOQ75}Y^I(=!77KQn{yx=mPgz-fC6-OxDLxLs<1wCPfuVNkwxVwFP`|zwOfDZL z4_#wseJ|47LOM|9fja76fXV!E)cclp-7F`LJLNUZURlQMH4C10VL&)yH8xyQrryY< z#SY87?s*nnA9NYZxbP6*{^uzHqW!?2jxAt%D4hcnystcYT{Y;EApA%khI_IA*6I+A zt};M9_!_T`>@{U|o9f|z1rjBP`+@3c`A{D$(C@hN4S@A8C~#V|q2#akES?Gf|W}UJShe7YR#&vEf-cFYhqK z1LB4_FpYt+u^0cs6RJEp57u55r3sFvOoEeX*BX62U(I)D zILU_kDqD6%FMuc}kZ$^TMZlp40vSKSVA}6wR3A=oFapN!AM=vR9B?>=?eVDaX~#WH zXTilm=m_VqJ^3qhkQ5!FNKcQVA~H$Fy36qXS)7rTwo6{(c@V=JwPbs$?*!qx#qyje zp~?|?X%@^rA9QWtEI2Gl6)5?L8v8NvETq0B;dYie1|C?DMv)IFa^T-f(8Rh&d!8#y z8E=cAifiAc()vs@9`5lH#)dxk%li(0l-@ZrcQ}ssEvr^wfzQqMbPhC6@I`D{Q7`&L z6q1m!=@$MQ@-An?UwfAaPPbf+mmARro?**!{O7Ey_4rcSwW_D)d{8Brd@``&(104F zobW7CMj}@1`Vsal1~;-fg(1(UgN4owxvbZUrEN38ElHJW3}B2x#!RT^yZJ$ltOC5t4fdI}C?QPK?z+JphJ#2gLr5)<)5P z+7ENo`@g%ir+srm0B@e7+5~fH7I0U@Xi+eZEP!LbG(U+ds~Gp;6{RMHEqwoY{w>s4 zNmWORuF1;aK`Rj%rt8TfE6*dd+#MwO6R02^afnfde-*ZAEwM_~tw@`pdlwKl?=5!$ zTzNn+p|$J=SdXsMzd9Ughe~sI-2fD%fivaTr(k6ethe~VJCq+D(^v4zPt{BPq`?V~ z$75yFAQ>k-*EtMak@q+ceGhVpE?+3+>f1YJilxH|ZtS+FF@g=ob0e`N{aV1=Uf?fY zX%8b{&Ka%&AyYg@3r0VRhM3h7RQW6Bgu#|)RBgC)bDS6rFZ8u0`k73p?GIr!W_;oq z=zeC~F7=w~p^{!%p?mx!QV&ESzR2Fum3+y3Ng=^Z*emU^jzb2>eA_&WV~8L28ys>2 zQ3$rF$c|c*0u!scx(`D~87Gqz5rgGNAXqk~z2@5GSdRwotw&!4*n^+;UJ8kI{1w0W z-J$^@53iZ9czZIy9{9I7z%}@KZkM?YzZsGyM?d36KA~Dug?XfSXPMMUJ?kc1$uYrt zMN7S`tSD@K-rDtNlO28RGCmtl34KsbK!l)8kYxjBpU^>f*Fou{BR6q%OqU!}nkteH zj|ma)34tT(i21KJU{E;S^aef67yx0s)Z&!201q}P9-RDY_fat3McW0Z;uZxFDCnF_ zqhn&A^Q<$%`gt@j3({HOr8=QN%;7W>j5s8fTkGBRl7~!UiTo^lYY8uBy>mXQ!M28s z^?sMf4T7=xc+Kjl=mMX!26d#+$n-KviXR8aSxRXOd-%pXi*zGzfn8f*ZTUZp;QV45 zWtp;^IE@+Ig&@$>OVS5jT1cHKwAHyRJ~me=$($G3iu%uv59bsU1DA~VcFjUT95S;*8DkQ!XH^@}HIaLKIwxpmu9`(S{E1Q)o&Kg4Fse5lC@!`x;q z;C{rWLwi^~MQWP>vgJgCCWKkdd`R*4(awOtky+f2z38YqJBhVini-%}aChxj6>bRF zVZ>gOQ-T_|tf|L>QvUf04)ohOB#Yoo%&5yr9tha(&0No-Ir+7VHnEiRYKvHcBP884 zO4G1~!Co}3L-+ltxB7(#G&ro`xa7JUU&!U2;JL#Cb||6r*-;V?PnL#jZbQ&Lz6paL zAE}@P>F^t{ivl2;w%)fL96gfY^#bwzNsy#pHIQet^0&}-Y-(4io4J7*IB_RZY)tly7T|;!bZ(%V-m% zhdlk=g3z*=3ZbN6li38VTzMG%+c;W8YIbem8EEU(<{I>Z_)2dnzbH$o(;z%kcJE+c zRSyB{h%>i0+CRVZiV&!4d%CWr;H^cfxx=kKSANUy6ZnhAL;bpve13 zT!C1kttn7+yKEERaq<-}4g>3*$6SfEgtZ6UN)PaV1fEks>a)kR-ZuNoz1Q-{0YThc z1M-Q1d*JPc?laFb!N90c5+ghZ0ST^`R8qGc6K75ie48mC_2Ed!d=5gP5H#PB~ zn!*E$*IG*{lU9t7sez7&e%|AN*@KbI0SXv8D?}tEvz6*Nmo&8Izs$!wvJ)rXZKCmj z{@Cx8$-yg=5SJBbvd6kGMKF$f@1b!&N0urHR3dPXuByQm8-Klt-1ZiC>+z?jAbtwD zQ*z3L2nQ| zK*h8DrLZq-EhlSb;I7dxeJSrq3c#fz?UMC#88o0C-J^f@FGbhIG*{_j83_XW{6E3V z4U>W(p!A=z`OIc+mIL0w9lF=xK1z7GSL+ZVA4baCQ;)cse))^ev<&37iQ2T`$Oos; zf%s@mUdk^b`#Lt8|9mL>|6>8_IxMEiBKwM-*0bsOmCQTQKm(ssO_XH9aP%|g+b_y> zA>5hyvy&$q*@t&(Gy@Jxq;)~pIv!c{n}R%J^2Ip5LQjVEZpbW zI_L42xjL8StGQ0P#?wHyzR`Z=PLq|JndF7y#hgrs?!)dr0-xwsDRdM?PGy_NvWovX z1yZDJ&NaWhJuAjNveUiEE)vzUFxHL!SQxI=@K^0WwyqpYcb(b5(1cr?xB9*{A=H5! zao_MqED$RNX=7*`2s(io#7e9r6KUcXUvU01XiFG=2(BYSm}_OMS#AS{E#o?O>7fI# z5K@nDg%uq$r;GkxEIuNcxDT$9BR*Ka&+UB`&TI(l39M-Gi}|+_b$(Qji}u7l+eArD zYzq+)XA>L;j061#c+@}f;K|UOl%BD*%;2AQbDsfk88Z1CD4jnz-A)hyRgU%x{uiH% zf2`vWcLMdN<&GJ^p;GE#qx9W^;AD=_8$~TT{aX;`BT<^Z>Opri-lSxIR-0Mbcr{@|hHKBb7kVs}HKF zkn981MVX!AQukEjPB>bPd9=M?4(Mrm;LrgOxb$uMiR*dxP{@oA7~G5PMZdjTz#uyv z5afvh8TRP183a2bVa=`h>oorypV}JrWf@K0w(be(ybE@v+weNCD zwf4)u@PMOM3(~ay!zsXGQhL6r81=Z_?KlJqCj?#b=`eYkG`vE75$BhGE#_ncvR8P! zvMh>6N$T=-!v{2w$Asy&h|A5AG_33~gcHR^#^w-TZ$jnqsyH~-Zk7jK*9VT4p33V^ z{CIu}$h7s9z#?_%Mnc`v`JGRM3&0i9v|LA>AK5=&N7%6*g?5gulf z@g;w{-)1@uLWB=>3!U(auRv_75~|Coy)P}fK?h>H92u`#-UKQ@{Zsrs$0R5bkS3`L zVxQ8@;0NBX+slDymSX~xW(e1UjWTvW2PeNzYCH?7i?%LAab8V@MqY@2QI$RQh}v$E z;GzWogV)*(>OvS4{}66I+zNOc`^fmgJo(YIME+L1Kl|t+YE$E)?4w=YbSpSwmo&6# z?Y0*j+MPNU7}W={u-2MHec#?$7BXo0n^Z&rdkLV*7&OiV9eqjG(WB5xlM-Nj|8WD? z-*?JviSpVQZrg1o@!W0=d9Ej_b!SJP0a|X(=8&gJ-g0;}3lYxD{6tex2=@4!A39K?1n5ySIN+o__02O~FqgFmp50zT<$dGIi3(rvx zTGtvmhqX_Zi8YD7b^AGcFr5j-yxgr1rqA4MTomyj)?>BuiHXPrDWN0=lv6kHXU%MA z5{RF(2H?x-td@EvsASwXlI$*V$d`vTL&?P zI#kgUbu0}8@uh|&E`w#&&etr@o}&Z*u>!M=q^If3-hTuA=laKGdp{lTaWSYt>Vd)w zsb>rv=(h+n5JEOdpNu9B&~9s0ZFh1jk0O#t2SL?9giA)H(`KuP0-e?C1;+>hw@=dr zS||C{vvg{2QX+s*w-Kvrz->V3DZ9kr`Y#bDBIouA=C+$Z)2sEg;xGJC)`m!+#dsQTh{o+LdP@P(w z$)cr+AyURXuHL9lRMJP!i$p3rIM|??K*mE)D1_MQf~dS^=r&saeEByLDWPEl1RdQl zt?meX|Ngq7`pJC2h_0-_D5xin)XtewShd6zVQwzB9NaRush(m(f#_901dc87_ z&dwr5SG!e4zX`487d_eKg-{>k8#!}#i3R5jUr$&)BQS#3xbstzII?{LrC^xQ9$Att zVlET~e3wmI!tMIW^{?m3`Q^Wb13MH;+?|1=-N;s4&D!KPpQJh6;2RwE{nL#D)4*eCcPL|}9K@z%P(>`_qvg2L0zcnbd8WAv>=VWjPiEHZr`C{y zaT-0lnl8*o;TtadOve7U@=xh0Z=p@f(REb~gAM16IwGU$>f$o|v~q}T6Fnd9tbxCu zP?&h~b94BS(LYD35Qfa}SPnYS1#Sc0ns8f;?nKZQb9IpMq6Fg*(YG!=gxbO+ckdtQ zJ>SSYJu52*xe$%@WE~44R8Y*jTxa0yF|lq5LJab)>z=WP?K8&ca4QSaWaDVuEps2< zAZXdOi7p7BC&CXS>N2%i7>~ zN3-YM3_pMdrqv-Ogu5Be^pb{P6bpe~a zeRk$P^nyaJfTxQAPv?0g`Ojqu;T--_pGc4=Wr7S6;%X0F!?GqrZAPtO629FY9I`Ty zOLzZcQ*_`B=#T66CyucyLr{2I8fVv;EC4#W*QVv}9C8KXyf70!c1vKG21uCB^z}G5 zmYmgjC$^PYu(xXHi7_LvX9O>|36wne4Nk6V6`9cg2{zfV5qxqrX4;o2*94q2h+pY~ zwurq|B#4RLQ=SdV_a1QHZyf#z!U6;s%KqJz1l zAkU{vX8nV(K>EBo_j$IfeJ5$o74j~>$*D@PAsG_Z*fX~PmVByFSPIp*XMJ5h=5T+~ ziseP`0ta-kiaV!SnW+r;!?(*oP(}|FIfBerRbPZlUO~=A01Ftplx0l`9TR&-K7f|< zV#ncb;g>^Z01!qnf2$KTkXS$s6@vWRL$|QE@S!_HwKf^)V7hOjx>pWJP7`CjJy{as3vEInxPP_sg7-K>dpLnvz-0~&sW zv~S*&{GopWHo%c#oxINBuJC3@&_n9y&(r5FUbJKG9rJ#G4F zlVMQxrPItNO!nUOMm2UQuBDcvi0DRbGsc?_Ib6QV3=FL7L%2U;zw|ZTvAbYCHq(og z?ZK%tKg_Iyb`jWG&o>7bIdaNEgSkNnN_0uY`UkN-zG1Dl`g?O+ZD-F-%)fne?c!h~ zQCTOD99$@LC3tN^%Q>gcV$p-y>p&Gphacv?rYI16sJZ-Sq}QHV;IozsiI(J}^fUsp zO5Gr_C~@uuB%$JV8%6ZExX+4T5SNG_H}*%?Tu3dEpaqgg#mY@Vs`l~=)a%t z5xI%_X?zs4KTv1ctnyDk(Zn@v&c{s9Pv2k4{{3fEg<}e6wzN1FHW|8ayYKAK7-vnz z4Y{wa1HpcCCMj_>o^B!zZ2v`r;0?&`!?o~dFX8AIo=FYyG9A$(`?=}=E_Mwj=HuNUUpsLiH_^P?cO`gAoIC;*eem!9r z&PM(<5gOG)I~0~oOW4tQVBHq}Zss#M0MtKsnR^<^z0cwSyau#a4FD(Z)6H?DbWnv> z`S}{mRiwI$84a{l{U9M7b&0h{c*<&2CzsY{O!+mGOGWclW;`uKu<@Ac?i;!BCODYp-7$QNG-9ZGiv0T@t}`_AyDlj%lcQ9&Y435IGmCYJ6f?ES&%N; zcRr6t<}x|DyJBP^>R>jZIj5VMEU2eatL}lx_?wiZL_yU8Yw{)Lkt=sq`e3$unZftR zh;W&EQ(FB8wnfCpbt;)Ya!Bx1-zrp7hrES=V3F$q)ZXq6@dX8%d#P)-Dcwl{QJ&|U z%N}%6y7xm&(2AIReW(j|2&Z#Siqv+`vi&vnN8j+aE4M@kB~M+#0e(|UR6YdbxnF{_ z@Y9AhRKFUh$(2jlIG?Vk$uk==cTJkYoe>aK>;=QnE;5uVFA9U{h|H_b3#6~%xH;?Q zKsR?%1ROU8oC=!+c)Vg!P0}QOZG^zYPb;LPi5bza=+#<@r&JgSlF!{_>+uz*hZ2SKeFdyM#L# zCJ~GyI`oP5242UcOGK)$a2PruNEcqqC(H#ceD_LUl(q$Y!8~4C_|nV2+6J2pAhO92 zut|q#Gh!6j=A)Yi(iQ1gsDR zny2=0ea?msL=N&;Naq;P+pCAhX*lS7NpLg+i-(mzPOgIXHbgU&Lmx3=L*?V~c7IGI z-D>ur6YK%#E2V(s0-u?LxSks(e5B(fp~+4dyEl8I*!V48c+r=m7mm11d}_7e?%l|_ z`y?`|UJ2LUrL6I}Jt43j`Xm|jNtq?jt5xVwXyZH{(J#P1acj>vxi<}& z_{aJ56zm`Sa*v19&EDY5SVwq~8iyZ*(PBLviEKhu{e(lav%9*_81hU0uUwB5-^8F&J4=!NY@iO}ml!!jIEr&J zg=pG@d#5Dnt(qvdYku0_=A)38O_lEqMr9>POJg( zko;eRRMc>JN%DDi?4_ zbM@TiCiVsyRDv0~vf-_h7}lE<%tA4rP029l*xQ$}8i+f?-J>Y>U9%A)j}nYyUy7q-MNV|(Q!Z0*ox_Swmu z+30n#CCN^uE*sf2;BaTPD4?v4r6_z{CV-L0tw1z==Q+;Rc<8Xoc$7S6po50gz&B~x zr<>rBK9}u!7oZPC=pUbd!KveC<9@+xBEd`NFhVjD2k0yPUE%W0SfBV|zLJ;NY!dzCgnVnT=dJ0GvynBY~9G zFAq$VUb7+FHO0kORTy%X$cz=B6DZlD%nkBHGUD!(W(Q{)xz<(l`S*G-Ey~zXIkSAP zsy^Pk9jEl}@N1;1gkY)cz^DWb>!rY$k0;xG^3ORV3I;kPu0)Tpr~BD8LjEz6wu45C&HC-(LhV4gHk01oGQZ%nQ!laJjmB%`L45`gz}C3bY{t zZIsTkx%KI>-C5zu8!{etd%k|zr_y+qH8GMp43dITt`JGSl-6dwo3NfIm2RoURWiNn z!~T8Y(7smil3mjNeD9OfGH=~zPP@=iQ*=bNC6%*^4V8<>w+QTEdMjRg@AB42VNe@f z%lo1X6{NJyUmxk?d4^~Q2a~t2k+}Rk9sG+@Bhrd!DiF+}wNi?ulyDJy)q5f5OU+~8 zvk>z;uNfz+bDhDpL=l~K&AX!M&&l@bYTHOK`$ACxbUZoSiaG}} zJ9H0M&HUb%@*(E;yD*9KtYs57)k#1;{vRu4E>e`KNbP&HYiOgVls@0YIEcv%5c8kq z9v_+G7%K+Qt|1v3nNm&w9lzJe!gNd{Qq1AQ+7Ag6c6{K%xTB1}YWdE`o`#rng^a&m zMRI>pQ-F#MN@@BX>zTd>t*DNhN60w!6@`nQ-W%swoDObdnBnNQkfwsNekRmq%g)N% z+#2V}8iE6K1FMf0K2QaUB*wXqK}YBBuL>caq!X9%VZwWbJM_3x{nu9$2y!*PBIgS( zrk{o5JdZhO%X8grWbPEAsqG>R6q5yYN*^v2>1v81*Tc%iqPN2zU_hm%e-6~-#)}a` zYC5RtYIA2VgV(Lk@o8#z!zI`%C<<5~eVU6K%lJqtlTB^3+9f=I@8sEb98z@;Kk($3 zm|fX|aGna(q2<#G%=TF%2pdWd(n%yRi2u?E>H?@?cRzewgkKtu?Gu@?Lia|@l3fmT z99XBpJoC1*4|Siux1XhE?iu?9Y(m`SimCV80%T#2co~Ym%8x4iQL(nn4kEda?LveZ%ccf+cHUHp%uF3%BydV~sc+aMWH zM^?)_#af;WTV>lC=tow~Hg|)Kmpk>O&#seP@;?WV_=BK$nM^WgB~|X*e}hMg6}fL6CxcsK2FSmK%z*V~Hoo zqF!GcVDTp)Xcr}t=V<3Ib#vBVAGf{BzEaijokkN*Ig|+r)Vx&p`k1IhMmguD>-l-J zep>_cK*)k7FTVEE+34=BJh`nF1rxqibqv;s#GbNqEzMFO$OE+i%EqPD#>Lg)thj*g zTLV+keTS>Iv%1|R9ifcf`?<-6CVJxC=axrjI}}gEFP&PwcdVM;xO}={*}xz>G%7wG zKQWA5q-d=;fGGsOO>Q|^XhinMs-6SG92$EWvNAqcMcBS>5MaM4J zuOuRk{r*8i8MR6NeZ#nMM^ukY`Z{;W%6EbB76kh~wkf+T7F5@3Xge#nqHMW@=e~3_ zRZa@-A5PZwM)g>gkjjYRcj#NB2tJXsM%xxuIr1X;4h{k0c|+6O3K?{`vsP^*R>$Yi zi2#A|@0URDR5j~R;m_HUvZaJEF6N0iQbhC;g6CyWF`kwmV`U;-DUw|XWp}(w; z*~I%Iajg%&20L$uqbe<1rBCz~#5vQoKU`U$sodZumwUbxvYN=p;>WlfR@`)3=>bn* z!(>G9=5K-BVB6Xfhpg~%7t}Of|7Oo-=ofPJ9sZqV4^MV(<-{CShch@@G5e2L8dsKn zO5Dx=!}uAvYjU?TXH`p-vbK`vk|tTxA0CP0PZ7v;o$ z_pFJMt<@C~BYUpqwb@ohV%X^7)*?pv54yW+diTnog!HU4vgpdN2NOJ@p?aJ;v>QXp zycY!sWCMjr8pq3c_AG6<+uqaJkb`zriK^%FWv8)fK7*_shZVlr6Xu62$g+8xxChiJ z_r3a`eR1bdz4S6tED~!m@7?F8y_+LqCDMWXrM1OsJ0t(7tn__H0uP%iOa`@yJw3auE<(- z(h_Vq{LOwkE*q=XzD+}0SLFm3KQ&zb{kvVt+X;_0g>`?nQ|&qTbxzElHp1z0b>VE< zJ1x9jRj)MuR!HmNHQ{;FFpA8#fvkQE?)XCZ{w_{1;vdpl>G%Feu3LJpz4g299t-?# zho#iFE=}Y`v%`uW{h#lkmU1|pS)MCT%#I>BTQmZ@m51&J_C9&KlKAV{4+jT&cjkei ztYeqGHSCK>iK9-H#uC}5b{Rdgz8Glg=&oYllLSp}wkt|fk5sYFti`MR9DXA?Cgn%$ z$1ZV`h9%R>gKwwobqjo~afdtszqPROq$@pX0%j8`0~12g+`_gMOpjL5XeJJIxH7oE zg?`@^q>5?N9r;Ec`?XfSbpp;0*HJHw5OEs z*{=o8^*g$nb*iKXsy#%#7RXJ^Qb-o0ug(PpPeEGI;38KEE{2gSMEp(KXHfh0v19w) ze!jj?rX+g1E}>j7JWSJmJOexd`W>d~xH$6WQ^GuVR$l(T9F>lq=#ne!*qF z1MR!?7s3*qH)M8#2Kv{!E0yF_rTlY91_IcfJ0c%SEScg@KOA!4i;tEMx(}Ws74i5I z#V?%&qfw;bnDPJkfG1=^%Ksk=KsX!kE26>di9S(g>iJ4%<$|b?)=lqks#jDWcW3dFbMD8KMZF@biWt}T8<$K-$MHK(?hlR$7FkLDZ^mg5Y8ma!vAB_PB zl)js3)P;Sh%1hMHv4m8CWUIAITGv~g)u-N(|4N*wvdwMNhHE0-vBE|Ee8k# z&J|DY?WT#F(MBK20;jJPtNVG+aV{BgY!K2S0$hHLigDHimrTS4fnO!r^CYyTt7i{T7O^-q;v>H2o@rbs$FuAjoTfVbwD} z#gRZ&QU5FRZQ2fcXS^j?F%!u2Rtwh4%(UMrzW@rg zVBb^#xQi;XW8q<164fBzSo*oMC^;Bx$mFnWN*0B4(vv;!l_oj&``Ns}Xm@|GR@L(# zvQ@iI%g-P#^MV2co*6&1Q8&v`zZ64wwa~F!`@3z~x%PwSXO~-?xoYN!)Y*VG{r{mZ z9yBK~Pa#A@mWy>qQdYH09ka9M=+sRR#yI;tJZScN7x>a3ET`GBk%3E_K{}-XjC<8b z{(KFixnN1HYkLj3F1A?>i>nNOuo}88c`(^7&2uvS9ET*=L<=Cekc{<{U5uckt0c>e z(Q+RR%nR7h9Iz-Sl}`tNSt{+-%Gra>8(l&)y-#mDQe^Hrn-1Ini8APs{}+(`Awrb^ zW(7U2)Q?R$lldyP2S8VMI1c5TtgN($CL%~PzkAI~rW(m`I-t3cK>{g)Lpj7L9wLE3 zo~5*XL#*#&z8+Y&)Sk~x-1-_tKDd@Odzb{blcec#Lr>rT_W$U5?{K)c?tOR=C3+c* zUZX`fdK*Lw6DpC{=9hDVz!mBphL*^m4P6&p{pQ$Iwj96mhB7bt$f{W#n+vj z(V1Bz@}Me?T2@hSTTo%o1?_sUmG-ZDVX24i3TxD}q8{Xs50(% zZpHVuFa^EU(T&9$@Lg1~0z4Di=z|bO-~WqajEo@`SxI9utr5m`kskGPkZEPNPQF`x)<+NY4 zJE&%<>VSX0$X&MghwO^%R-xJ(H5lc4zQvTgh7mBiFS8b3*s!+Mo&M(_805fSVvosgEkE~t9H z&DS;>*xT+U9~5j41zrGd)$2AG61HiJ-Xf^_uuAIv>>%M)3SHQ0Pc|Qk6U)M(4vLQm zH$y0^OrJ_yw^VDR-9HPi%p(C~k2d~ug~%xhqTeeS|FMFG?qch!*fokG zKy^*LTS?`}rJ&oG$;R|;%MXoB0~5EYCymuCA-zhEZ#S;LEXU8P93UTOE=ijK&QucuLg8u6t=~nMeJ^m&Af3(WR{=V!2 z6&u0vs*re8cTK)d33F>Y+qVrC53EYcTetl#WA)E9u>rwuTmad>;>5AB>SnEDZ<#qSt{u5pIz;9tpr8I^VlW(_>BG;_^nJjPp^1 zY%K#JVLECoJomq0(IzN}@zuH&_qYSY$GDbm0qP~0(r)1@7tZ;w?)`g1BN4ai!&Z43pqpC?tUbM67f8Ae%7<@Zo2}(T=sn_JPLLAy_=TyvZX0od zc`3PvMQ&4p7k0jfN(^x1_XX^JM2%_<@6~&q=wwk@J|cje{=t_pHM|{w_W4;q@-b(w zgoqypv)fcB$oghWhEe_jFXnMUh$!UdI)BdAPzt;154~U};um0Dr0l2p?@`ejI!X@< zl6xI;!L^Mvb&neEl{{)cD3$%t1=~`wlVMnTXQV(waZda6+6Vuk;~V41F!`-7Mh(Bpf44)$9(;Z>Yk!&2yr;{sNxd_>2_?K3&JH)#g z*pxe71QBt@+9aJJb2P#2^i0dZKPrxSl>PmK7-ZhNfW41GANg2SYb#ax^av*Ge$PUy z$Tz<-*_aw{w-0}{!Sl6_Z=L||G&O$9j`DGFYPVN3b@xGZ+1Ou3bRtV(e5W&(r=9IG zH?Ip79-X*5yy&m{DNEJrTNtBBz4U-=uWxDca(Ckfo2A_Hpg4X#_}@ZrF+k`UJ)D^6 zg>7fi;k1fL`pm}Wc=TP~N4vcwXZf{zJmLzW7~}Uh#*NfqCiih&SLz6mAB44K;_fXk zH0T1#S3CF+NL4CJIV0u0FnfwljEg}q zU--7(&?73>Ti1RwU|^h+wZh*##AiQl#SB1}!?VWQC9ibU-okn1uZERX%0lH6Se0hb zGR&RXWG59GTR7D=Rl%w)l?RXJShPgI(AOJS-|R)#zP3s8f^5IW1%q~FI@mQX_MA(N zXd7_&B;?!pRrUd|<;mxWOb%7%3eIfhG|_YZRcj?;*!8pt(D#GA(-MENexbg~pUcD)+rS%0*ncDQ zdx5FkMmhQbg4cmHizL~tsWB5@hZ8jSq1Pfw{fk5RT9l=; z9Z}ONw*3IMx4NsCN9nn8$N)Eb;mMQ3_6bqSR>nC&i4>`kZP9XPvlCsB9mN8lcmu5n z@uh=nS{%pAHEfDxBX5ahG`hvBst39b4i(!aS)9HFCbwyb{J7`l@kLDl(sd91Zvc5M z?5l5uSL9dgF*)gb&i+be@vLQSrVEjSu5)_BaBnB`cRI;e&?)Q15K6pwSs`Qa2oY!U zP^-S6H!h-=jAwF8V_bep3>y{8;|f()r^!qd*S)Vcx#g_jZIL{)m)qN+{YU*`g`mG7 z%XDR18&tHaIwHyCcw%DNLA+BJv%Hs?u`52EVL$?@y_E7VrE8txuaD0Zr_n1KK7$n5 zttr}_YpJ~MkKGw^m-2<~tkKqO7q^@hJszD-Tl>S|m$- zb#}WcjbE%EvII%Qv&Vp*gp5{fo>5vxSj05Gu$j!j{I?HcEUrIS$}PprEkS`~8Z0h8GRn4-* z(`XV#3aWd>K|q1JOFHh1xc0ZX;pF>ja$B`$v0>JjgGVyFt^OY2BVEhNW`8}B!mO|u z)O5ZZc@ZekqA113zucbGI1XMF{Xe4h-PPBi^G1aYG8*#+G$qpBbuX6^h&g6Ictx%v zNwQiebaDAgF=->?L?)?D(gkS1ix2kSi4gR|In4o3b(&YnWE+>@geN!H3mzJV7YJiT zYC^=3;(OjAsohLyp%+gA<;%#|w`_CD6=izU%5s}^J0(@`BIXFSlo+Fm5J9VHn=d6v za^y2gh=%s{`167~;8_jJDj?bz{E3GxaPQLU&%p`AsKuD_6x%_fY%@ru)hg?E<1FKa z#U@}s0#DNwZx|;Jw#)H&>bGZI2V%x)YL-6t6+wC`oZX-^oBaIk_O6*wU(3TN=t@A? z&G{?~SoERbXWEh?9T%Jw^c!!1V{oI^nI`qA)$SFCf^*7CXf8EaIC;P%N26I(Cow+u3gCnvl~C z>qjINde|v|c<91F(W-^)so%+GsgTT_gm`bZP?z|Ne@a9zi%CXTw!4t;No{geSF@D( z;Ch@etLf0NR9AyefC8xhII%QGwmH3TI-^~1j+``;i?~OS0Q`=+*q%?xHRc`O$|O|s ztiGovxuUN|p;tUh=EERc8x?syd&hJ04&&sbgK1KNTZb@5THMy-<^C2@V#~gfhKJuJ zRs8%5_|!DK%B94KF14g=g%x_i1YI7LE_bqNS-_FIAN$SX(?0Cv?y{0^i^(0tv9n{8 zQP^1E%Kn_dm5KhivLSF~4F~bATDX^!s9c>N-z-^7Id!sF3|I&DAs;e(xV5v}E|u#9 zvPx}?IZ46=bn^Q1U=a|l$30kA&ynAW2tudPgqewDro5}1k8!-4x!!|9P8z3~tMMkr zxdF&KLW4J~BSH&$6^77JdaidQSr8U+VUpWW2ga>J+ZYJdiEmnQtj1sn4TXd}J#dQihnvwNl0IOEa}Z71Buv zFiI`mAB6rCpc_e1nq@RruL|e3Gis{Knr|T}MYzn-KylTRl1|z@PL0K*%(99g48CEh z;O1Idy_L$F{49^y^mU^P*|4Wj-;Lz5kf)|OEi1!LIP?{)X8&hZNmK*vrq3J<0YQCt z`;+TF&crmfDz0SUBxMOcli^tr3K zD%9u_<4DT!pd%$>*L-s1w~rtvb30{TCR;18{n9SJ*PjZ!#Q4S2TZYv)g!C*T&IRJA z6BjnpA1^=u*i_4;2>%9MsE=KPZYpf@+3O8ocI{n_Yh2DHfNbb3{kM}P^oXJwAOGxN z!0_Z;#Jz(ApeD*fKs3%zJGd9Wh|8?d7;@0`{27v%B~@wh>2I(^aUFWC`h#1tPm){r z196iMqP0CZcS-ZIFu1@6tBz!2$whk3=-pp@y?uG< zeZLL*SEc6eIF7%Q&0Z)mWg7}`*5WjHWZIHJcxIa6=28vIv*%^CZ<}`V$gVQZF-Q!F z^Od)VFC&4eRVC9tAzO`kg_I+1PF#g<${-Z0ru6SD{X&ckQz`H<7v#P@6g5l7)_$wM z=wjzDG4-rN#tVOpwS0eELFFG;K)huF8q;)1%)ICD%|V6O*tLLl7A?y``2}H}R!Fw~ zK`Tm&&;b(|a^nP&7X7?3Fbfle&YdPD+=nKXorwq-F2Wz#Z_Fw0qN+Ih4i!|sVAhNY zj&vU50%mmI8$|o#Rko{p+>jQ~*I$hbovhEC7R9Y{#Xk4rJIQD=u$llD+WfD=N)XCk zUeLb?)@}%8?X~f^IN`nYYyM@^-gqyEF`Yh0<&DD)+w5?XQY_IUychJbe*^nfBCMTH zw}c9WMJ&iqS}s5R7NS4kjO$CbP}n^j$`*1mvd;60_cA2{f-QJ?))GAKo8l8rs2F=j z`&mtPI(bqAGc-e2eltL31a+G&Y<~e{G4I^+BX97k=Dm_X?s?}gNr8((rbO66W67^6 zJI>L=*2`bfbrdhL9`FZLend`hT8(^Sd_Mq6= zerS765JjNaW@o-X+;9hsLe!G}IZ09cNv5-+Hj$3$y1>p#@OE~~tgixA_c^$3@{;;t z8+KquL6R2SQk*Vc1W}~`1Wm`Q=5SGYpc&|){(kr2k5VE21CuSs-TAQY2QDE~+ zCL4CJFd5V0wZ7~D1925LHR!&)N!$}c%(hvQ1sT(EIhN=^*!r4?1LkK|qE&kQs!oWr zRO7GK{YaGREJk6vjG&Z0=bfYu^RSBD-Q^FjsierS-s8R58*gJ7p~bjdo0joLHC%z= zvvcX3yAt^$^7Xh!v9SaSZ|#_Qd=AR!O3Ge=r~K@K{^WW~YxlBhD`HxRQi&ViB?Z}} zd@NB8Ld>|8jEW5=kGr%}JMJjY+sfw^wyMIN6}VZAQM02LlQ!`m1k8ObZKon3SZ* zSoT1886kqh{M_c`0b$=idUC(e;d5;^mKL|!73&@g&9DT!IJ58|$BNa4Ira7V$XlU5j%ZRO`Ccc4 zYWdMj4&NDrf5IddOwrVd$vU`x$N|R&NY?-M>=pTaGx0ZGxyA54ddp@;?(3D0O^_i@ zW?@0H*$3G&-nWC`qGv+x@arL@;#!3uTI2<6OY$k9+46WJ7lvN_rOhWP(ojot=Solg zvH6HQOje4r@zOPogerR&ACT%=PEmT|sd6iP%sMB1w1IAa$;H!^ zXloR_d?)*_TG=uM*~|fsP#sBWaMovGL2J1mPT;sc{?dd9RZXd$sA&rZy+tX{(^tjL1Ik!`B4cojs+w6sO)23V` zV5#L#@T9~ScV%LzeND5MZ=nKZk`)5*mKegrhy0UHoyZ(Vsqag-DWRCj{jE1Ngur(6e<25-B`WUPiFz}> ziG3a){#p!Jv{Q|}65sPmN~}UC=m=TgQ=)hHEI}pCXBST8mfyFt%yf^`(yYnuF7vv)FY*Kq36?iex4 z)rNRxQ~gy7o-iHFF>tk6&=C7tda(WMpn9dyiZKnya62=wM{O(GAOID5AiIiwV_~Dr5Ys!q5QA6cO4>g%R}5a-39AS&smiwupt|1%JQ=1W${uT=d;jzO%xmHpcVTUd5m1Hz zMN4;u;1PLoGSyeuyhLK^;fSaRWiaB%3D1!N&g zs>F^WGk3+92C(OTcYUZiP*X*fkt_0}i>wNoA2*lB>VBWs+d?A1 zh&)ko9Q3Q9H7O1g<#lT_alg?eEA8p?V7^l;3*}B3sFUHCj!-Udqo+YwCcP-A^x>IK z*T!@fq=0hnT*7EMOiU(sTY)zcm=_n6VsPVtE0IvG~W`CIa z#m}Wh7KB~9nqzO&bITPaZ$dp8DCpBBdg`MFt2e}o&E<2Zu?)|S*5UR0{B!_kO5Mmx zyOJmS#e=nQ-RHNiL~80)R~LL(vod7BQHKq~r=I>)c4WpWRyd~*f{3Of(B6BSm4$vM zmFJR&CuX5K0JiqzfG|95WW;eJUtgm}{r{@0LprXZ!L*igL0@<0h7`WGk2N-~TLq^g*R?5m{hrV*-+Y?9Af_tnul;e@d z3u;Ga4R53$Lcsdhzjfcz%0E@ic0lprnmMsvCAwO`G+*KJw$F3+L|CtHCniYZ?m*0$ zQUXindviWZsx1oOdi^8qZaHtE+%X*;kLLO6WDY+MD$=vGd9aq~Ge1UBMtKb++}Y^a z-Au9M4+t2sS`e)ItUqwXR~db{uUw*!5uZ_x>^|HaC2g5iS(eeTa;S$uF8<+NmjwO^ zm#41)G<=2}JFR&k%b1^vhB*Ug4@_hZ}ja={=Y; zX=VGjJ7xPCDAgF3X`lV?+v>x4xfzvhyVOTU#%dtne#!5eoO+be)+F_oX+g@LNpE1f z;QDY1KTDhMGg1=9{iddJRO*!APTI%?FMR8kzv$l#mz)bQ<)J&2;Ov9GsC3N~t8B|_ zfwgE&_hmyM&s>P82(?WJOYTRrXIi^R!u_~FzlkdV0~}>xp8Ie8V^W=LvRtV1dm9^% z4S@v(()HR|;h@P=J(e`tp)YsnwkfGT>3^Bl%g~D#Y?sb*axZh^bLyUzXqA)cQL)=H zv>NV#KPDH?w0d|Hj`URMN@m9BJx;t%DheMne+jMC65a6A7 z@@JWeZyZ#ead!N`sNyU|`%CJWHdb%#N*oF!75ks8%8hHEW_pFlOWx#(*csnQuRGX} z>ypV+&BzE;t#rj`*WAR}q){E6-bF6yY%0GF5+De5hM+F|Nh>=!?iaKOKatQa%wNkX zZ!y82&PtPnb$W)Q7KS4R@>{KP6s43{9OTfMZHn=LOKTA?H>}rw0aLxI`y?M*kq24aNwgA;B@t_ z{E1K!F|K-`dNQupZeKf|u0SOUzj!=%oz77@@Vhe2)JNkd;w_W5%)3Z(P@nJ2aL3#j zgU_cd+i5N~OFTLIj=d?Ac$`F-yj?1j5;=ek9r1pV0VDZLKkrmk<7i3=Bk5t#H}9dg*cZ4H?<^@GZ&SyAM#wTZ`ttHxX7X2^|OPq50jpw$Z2uMA6B1lF4I0(vS5+BeDh@ep8 zcFKZsN#T~3_*D)>xU<9T24YDE(?6Zd?u9l&am3Uo5k=|n4s*G!kIN)b#=s7;6C$e; zMzLktquO)g7d`O=xE`R-|I@~YRb+o~Rqsv%gG1^}2sh=FYb4`^A;kw0PmBFH(dQJAlamn=bC*@Mcs=q-)_oU%>vw`sh z7m-?0K-ThaJ&{8<4GdKuG9@SGJ-|DY1w$ID89XJV`NtC#{^N-s^DzcwuZG~Kx%_a(_LQpjJ?kqLeo-Rj#LgErpJsFLx4Vlgx(c z@`DM*w2aM23{>^XV^Ni^-?ok#y*NK*HKwCGPFneSFq`?Jg%u%$|9`Mv zCPg(qS0f@*9ik%Dt?eRpZDc`<&Q;0p*1|kG*vo;((JrjL;w$jxFJ}FK?);~uay^`` zE;;m66FqtqO&y-<_j-$jrLp{ltYU~qA-TeVlP*X%`u(y>l8#QxS7l6!h}Z9#zOHex zeYZK2!ICm+-(ID=_I|woCUQ>PYxaE<5k$}6kF9et-_qBxmnq^O3(o#2!Cbvqp~=_O zviKF9;6jsalFW%&7r0#-GYo1i@}qxfp2v2q66W(lP8UXUJeDW2>otL)f9-fKD|sIr9P!YD_Z4ZR z!a|<6nL*(&iJlbw8q<7$2gS)^8=egF0XgB_n_)iveKEZ2U8)W+&ku^=;e=1|dC?Z{ z6J|7vE&(M?S$wXq*k-?fXB%Aq&&mdqI66fzZ{)juc)$YTmiiM#!{FZhptapKIpk=v zO_ONeTVG?%`hC!vk6+rqxS0#ut(;pEFZ}d-Ek1NgEwu(qr1hAWWX-_khK`LV=ZV8D z0X(eFx098v7+?DmMGexrZIaY!#R%hC)u1Uk+L4 z=hLRgDbRJo)o8rjQhMBb=T@tv%wK(`{gKz(Bfb?c zUIq!dlwodFo;QfhbvKA?C>QDc-uup_2|lKJmSj-zU{3rhej?P6}leZk0|*SD~p@wh+M! zK+B~|YD;jR8-t{l^BY-ZA^AcVR7$aL94NijaOo4*{}qTmSAyQx@&c%*0doC%e-)}b}2T5CdhYvtpzr2sn^7>srD9G{1ExtGydY{ zSKuy1vRnKR6!rg<3I0xnbJ?73;&o(KqH!5IQg+MKY`NGF*H?<` zFrz`z=xJ}bMbsb8gsV=dLrC@&3xpGrG(uf4X^~zVCMdyRwOR^4a(_W^U}1L!F7P|g zUb{MHvT-%wuD|b>cH%_ma7|`soi|G0uo(R>1EvCkxc}$8f}?q7NLP}cLSGR0%RFKx zIm6b{Wi;Q6E(*XrV;8OZUK~uWnl@cbCkDFur4C+}cmkvD_UwpX*vhTk?oCaCznY}P zNg9GSJfBj!qA66g>8$Yf(~|3s(-5dx?jYc(^%lGoLb0KVmxoO~3DM&1a7t(TvAIs| zKV1VX-5+~a!Oz#k+>K;6+>-S?#r44X>-~1x0uoiyqKx&v&lnV6 zBN%6J=xA-DH9xL*%ZaM{g-eS>TAw5)Xei|bN;Vw$XQ5nxi-~P9_&3qg@Dj1UhuG8s zaXdor@c`Rd@vkW&pAagQlYRa-z?EWKyb$n4+E^E1TFFl*MUNb3rl^FV?y|P?d-J0V z0|_s6sb6=_p7;cvKiI{+t6<~8uM-tHt0%lOaA`Q0q-dsnBQSDVThRa9`AJ z*4N;%vC|u~h}V}dFQNrJiK=`oz!2_T#xE6|LSiv{3lK}bNK>rU|E-ksBylMXfb`P+o9K+2~P zV+cIPg+={z3V}LG>Skjc9`+uNoS%BrI7rz#imo`<&eKAE1h`&vb-M$JEXYUWv=U9V znr`m2=`>l_xVUoJG(>$1tA9#T07~e}W-b%F6ZQo-DlgiEZzePE@81qkTti!%AV`?} z$vSf{EU*C31U|qT`6VDd^70O2kruu@Z!Pdj@RMmkCI=Ax^o@%fch+eT0VW@K`5g$E zc)|{w4a_X%;^?~_U$?=yyjKNo+}l6<9vKnhZ=UMn=0w~3dwpz9YO@$@TiJRm!T-oQ zu@{2Ug!3;xB*VtJwNrNZ^03~oOPE(|E$b)JvU1}jzhRn|IG)-Um&ogEC{jT2(-;za zy~?aGh(1@2bNMVuGj=mE|5gb?9-YndG1Nof&H!H!*4^@Nq-Ark`#~OAW7(9MAw$qu zrl_PvQP|B^5BdAzK?pTcIib1Gf;ptE;SyeWEg*c_bx(R<`iR@iYOOSEWL29vq^{g} zwcq`!QG!__&p5e_feqTQuZYO`w?m@{iloIGG9IF>iwtyk2)`vf)WW`u2?~>dN;|uf zS8C+9FGW@eEhB!D$iq2$0NMAr#01Z7WuK>}y}Y5~@*^+Odyp~G3BhxIGI#-El{KL3 zK$_E*_Odco+G*iRfK`zr2*7uitWjcr8=FV9$0!`MC_WSk-=!wpclr4&Hf*M^Y+cEM{}_A~gCZZzT7S&S_g||d3`wNg z*d0}#Alc7nUn~F{5@#ed7U{d|=D{b$thXk_#H?guM@CZbztbj<$>y<qmTkD)c)u!87nVjxh_C{auqfo3$BDacLr@(Jbc4$Vowfow#+! zn$Kd3tI(DxP77cxtq{*83(iYKxk&+9f1`QykxLk*7c^Sr`-9*nC(v3z+M)Iz-#8u3>QeseF-fGXL+|JC` znMsX`@|=5;MUVWSqBs)5U*q#*gRfv3!N6tkI5_BJgV+)alh5F7ariBmnF>os|6;OffV_t%GQU^~ecJ;WAh+jC2l9;% z9}1m__XC?;!W}R0u3DmRgP1JEI$Ry`L(e;r;9 z>%Q&1bd*ULhgA*gWqJRmUd~Qe2Dvj8>F{RRmG#rSCPLYgPq9Ne6tFi3M7l)E2&;KyfE6{}B>2@Sfggzx)Hh z9>*n8$VrIl$A*{eD;ZF@?MtvQXNsj=e4CG%;kOHVdFXxYy-^gv`c4wsR}~o^Z(B>T zgY;i6;09`a^Agg^ghR&ZWMA>lXRIZb@{WZc;Hun!OyJXr&2fo+(+pG z5#2^L zEsY)bu0kh!jql+(05@A{0%i#S{*V9$4g z41c|Rezmr#X|bksF>20GHr1Xx(URzdzSoX9@$Zb>w5X$xRg!5fGlWID0HZtEj09Xu zp(Pq+=LqfxZ$Za|MVQvQkU-Y5fJL9ayZ<`xU9Y}AD&!*Z!W{6(8?FMh;(xgc0jVKJ zRz^>QQY4tj1MjRBx#iICnA}&jEa*Jk(dQ9@_zMF2hJGa8Z2MdvTlh(bM8}@tZN0zF zaS2E{EvX?6kOLc!2I?6;kZ}IPTe(Mgv-im~MNvLrhUK=wngTr5KJ|nq5zL4Z!m=~oYl|~m90aFAS#Vbz0+=6A?p_x;|h^s00Le z;@%kTy{)=2wKQi#e;6D+IcY0(lLVrefuG@yM&vv1JoncdK7VU*{nnTctZ)QWLAg!0 zL4+|@yVXXdzBq-rYF46n-~U{{yRA$dpFLY~CMuwD2L|HVwq2*me&s5!+d&E$X)fF{ zmXERto=;;#jPkD}htNqy<8hmKn1K(DFMqBksz1z~t8#zkiD&(($q%q*xJUyy$ zzGb33G?yZQr00_Tke@)@MaeaZ?z!jD7}^M6_qhOay$Z)ayii1APs6#7vp1@7 zb2QN$+;|1kB0{m_%hyjjbcsRqNxJjsFcghHVG-F&3t!X&yu!og0ZbtznlS;M3jI-y z_f%6Wx-84Eo9@$OZGg_*y}2t68?OxGBcUBu>|_frB0lh+i~%7XefEq8FF1b0Yrtwj zctVMiXQo$G=0_JpUJSwKv@Ij6rpci#Tn5JMdE+KST>}F685g5a&9}rMl;%##Je}@+gz#NFHk; z1d&Q_jMYLaZ+t1Qw>1r@noP$6mK5f?mHQWhBI{aGTr4J7CG&irh1s8eozh)pM;PD= zHZ%b)^sh=~AWF9)qvS#7Q>Un0R5Vy{N&WkW9rv&~YBh|Jf|o8N|4zWBw$=UzU@u2; zkt8p)M5QD6x~x*#NK)nNvhf0AO(z}WI_^yd80^*P#Fx#YCt1Ob&Y|&Ou%PD-3?`WI!5&Y&fhZ2J^KG0nFQtPcT(5kCmNd+hmXc#i=+Z zk&7Qx7bk|SC4jKPXkqlULOl7V0g*0{)K$M1^DQQ8x=;#30@3oVd;*0!qx_UopQUK| z|2mgN?&^-geKwHAEgjCu9t?MS|2QJ`W>#A-eqsf1nn{F`rN$ou zmQ+^a3EJA?x7hu}VH2VS!ditZ1?LDteC9b6XsJDOCd@cvDKXvGdhFM}(Hie1aEWsF%}E#F+77YM(d zjIEVoa{ojdwa^PvPr_Le_@A&iA)*jER#t7Cw}=ty5n`V|@e?Nc_EV85z9J$~MKaoB zKE#f6zWrseTJug*ElrmP#~d6*j~N^l0_64RctIQYeCwBFe8`*;x%MA?)q z9SZnWTvY<}W=yJs(D6|y$y?IoQSDRe1UD@;rs2}BQ!&3TrKL3swt+g=+I2%?ei$AH zA;B|IWd-UV3j*cBPh9hD?A_i~~M8e%h|cnOkQ(|DU4 z%^RIV8%g)UCH;Z9ErJMM(0f#@0-dYp=vb9iFj+RIh6#`gz25(>!HqR2>gyP0iZw0E z+VUPxr#|({56!Pr?>lMwQGv>Ucin}pdmi6o+ae^~xXR#eS~nsxavk)~9%t24K+>us zz|&FEXt0c7C@2=K$;pLtaoYi**&ym7U`{F>>N{ixJ~u#d!uVX%B=I0jNj?j57hwu| z5+Yd_2RQn4F6!=>4yKo1L>RnFG@{(Rl9yIc?GQzVvays&dq1w1`q2bznMea9rDeip zD(uFglro~RT3g$O6wO*6=2>1H&z4oS>enTt&j{_S$$OSggsl3SJHjs6kgZ}xpV`#q zxS|tNke?Vx6#8QW@<69kU08vF2(p$AVjVHq&_LhD#1eg4q&>j{ClWw3piF_JmZI^$ z-j`-zpZ#>2a-AbXI3#|}0271Q;hiXeuQL$tJ~qz>9`e%cA*Ey?vE|%d3Sg)rG2B%Z zkCz^=VoW?0DrzM+HXGe?7}m-9O%*;oaB*K74IJzv)zbz+c_8VI>taiwE7RI&j0;y9 z0SI;hb6r|8L5*wK7V;1*@rlcHtK35f-P;7Z|wYhcZ-=ia6~p0TR&mBGGg62I@lqq1xJNsVmpDv;LB)&Wym z>z;$@K;e6sJ>_Y;<-X^eRe&%2jZfDn8%9|Pe=JuebPlB`q-7lvw;5MGWw)Ritw6Rj zP)If~$RHdx+jg*(bj2B!_p(I9KNu}byXz{EoqRr#ILee>B&M6lwpB;pm{Ht?sOz3fg ziNyui0X42~<^2@GU{;J7u=T~!s~ER-HsXyADVH)e{)SC}AnTIp7m~q#M#g zk1?Xy657wF-d&ND_<}rkLk0df_tOHKcqxdOq6E&b4Y+wU?AT;z6e>2$C zHPUoUQva0sqWSAQWP^t;cnTZb6KePHmxv>*jg=}NZ%;tt% z`@3>0wuDDNILeHg{JeCsa6`LKwbZ+po0t_{5epa0!gQcJxN(?oE+*=#7Y&Qp?Atyj$dfMV8A!N^JNe7jVN1nK~mgn}ALSg$4YyoU8cE+a?0uH>~ zbs&{U-TU#rO0tMnUGdQn8>29R9arMlk5Afg4Mx?^wsMr`d{z#>-`YOAFwMpAaV~Ydw+bOa%IRnKignTJDhiKSx(OTf~!FueQ+{Z z0m@jVfk;9?M!~GEcRc(HAQ|kzXQ22{pm+oPJ(`r>0QF;VpTspXqIzK;GOrVNskN)a zKK`oj|73K^nHxIrcAkr$aQ^L;&?S0u8u9h{Une%17@ghlBEpx4r(+FbT)K1WdqU6< z1^ER8ZxcDA#uIe1K7Q9kn*fZFaoDR|hGNe%y0-`%@6zo`^wsf1RQQ*Do$5gmanOMY z^~ArW;U2uluScu6`jf4#Oj(zX_-1q1Z13Ow#&UrlH>|P^12bbk|0es-dC-CL0Er+Y z+qY#Mpd*!war1cN-T)VlBew8qU-Z71x289>mf;qjJ)?ruUhZ>+MMaS^*KWy&oNlAH z&TAD8doSSLv8uYCWj#6>V#?J$+d3JMVBRA*D|d4&fSQ#U;i3>%iCh2p2NvrSy7bdu8*nCzu-}ClXtfB;SarYy zBw77UzK(f$vJzoq982D(f?0--J!@l`wIt?E7yA={eGOE6>1-Lrdkh5eY@<-W+Y$!p zf&i`x7KtxjkP%Ym$zAw5=3&e(0IRcE_`<9eP_Zs`hwLoLjCE+O z?Wou%8ev@h8^VdInPqIBXpmADWMvrR>l=rl8lnDxUiHxK2qcM^B3qJb3f2{S|NPUY(Q zi*WQI8DpBZ;?NK<^F|Ap1>?a*jNVP~T(apvxKSu`1gOpL1&PH{mE857UK@Qg!j6^N zp}1FUA*PiBh*>+5wMbcov|LaNOAL`G3{-v@PIsYH{RraXTF!r#Y+?98qa)>@+J3T_ z(y{qZnPsu~N{gRiKg<3V%YL4?inOMLO~C~_gTzt5OXZfQgyagZ7(K#^WC}zg4junJ z=>Kw1R|-0KZbr?r@g>08(fvN*(re#Oc?DvW3fSVxfY9H@S(UVwq{7E7@6V_wb^z9< z?&KqFYvA1kko$Ht$lvWH63|{kU;weJa6QZ`6;`)>)B*fWPypN+f|A3{{m`k3P7md9 zd;r;o_T43bU<}4ErwK#YXGyRz5{Ro~IS*sPWW|IwP*8u1?;=>Cwv|3qwM(8$Oz#mw z%5E3AFz=@$-YpNjzPxoQ*1sxJNwo!QNYbB{<5#+3^De&!6HKR`xg)QIwIatVEQRgpflSWvA@X_x|vFy+6O-<>He6 zbbH+Ix7+o0yIpVB`~4nRpw_AP5_bkcVu4f<1uTrsxx< z^RM=XQ#P-@?PnHmP|4+fvHr3V!erGHkSrAU**f>pBO1rzhn!4sC9VGsyj9@y?To3T z)7hIGi)4B+@PSh`4W3;qmQ{PjtnyAX7f_PWVFd3P2%n2_FN+jA7C)=$pIQUsa7fv?QGd*YrNxS1?^18nD;74Ze~ye4k=3ne0fb7v!x| z*hh{1RDZ`(w&Lad^OH%oB9u>1kb~Ds2jQU*HH71f1f9q$>#|18JQlZhj9=Kx>AB5zZ$r-AyK zy7BF1F2_RGl&ycr;jT7yfrVn)1E%HuT$GMIR(&s#?08}!o-^`{Fv*m|EXr0T^8T;# zoITlOm3}_RR^-k*TxyU#1hL41M-y=49Sw~l2cH^n4&c(H{_&38+l^OGNy&rdA}=(v zHF|cgj931Px?8_HI*&w5Sx(Y}w-jVvRq|V>MAw#6g&R${+&zY;c=LRrAY7R#xcap= zic=`{??j!h#6ja3S*5O12Z=O$VI?E?R?q3&o^3+J<5wa=x;qluJ!ZA&rMW0W1 zAkGfMzGFn)(e`1D<_zpHp|A0erf;JqX6wDPjdyE^{}@UP(jV`d=g~T~t!Bu$?;ddA zJ=XJPM@_`-#_SLG~DwRgu@{gu&Vwv1JrEc7Um=UKj|OVn8D1{IW%2F$uQDm|3y3%5hv zqpQUV^>-$emj4}yo3U(eV*Tl>D`l}!Now4?bnzD^Q$lt+?QVVyC1GY>89Ad_dx8>q zmliMPor<3Udm}jtfd?k)L;IRIKlp;{x9uqvk*PmAy^!2nc8yp6c@V(X;Z+(uTURVA z_QYN-$uD(j>p^$!gWtD5*@o{1MYhT{%-Lby!h1Gvq28+Kd2RW zwPKwwj3&8Hf~dS6v_K`wHFouQBu}^d$T5(eBij-NqKXPGqrshnQqJX6L`D-e#fG`q ze1(qa$T#5CV;a7m&icK%_@z&^!Z;x{k%xRNJy(}n(k|jx z;RyH`83YzzaHFKXxDpt-?l#aG4tbPb?M2A52``H29!yp)ooRwkAdAA5O*f zB3E|fL~91d!_j6vJKh~QHYSN~qdry$ph^lrmDp?R zvQw08m~4gfl&fZ?x1$^0d|O&C z3S1m>!5L%Syl4I-JgE%LeL*6Z7Kw77umnDy2Ymc7C($nHu#3y8agSZCDGC-59+*KuT^cA~p_@yWvjI|GuV)81O?SNpzF5DRN>FquSgGjBzT*<(k2VD5c03?k+P-E^*n#i};6NE^ zz=6(uyH`yW2_wM5=dh1zE@H@+V$Re9V%qhs_JE^YMyY#-A^^MT1 z3ncblElxGGIEEDLbV2&281LS!xAkmH9n3ydME6|U;@kezAS=7^p>=eqyXw@AL1(d9 z{KQgQ_#nJ#nyCG+LKXWe_z{d;^k5`TLk10wRuMAA4_!RpK*S@Bhscb;OLI9-n&wmzE>UqCxU(N|iGLqLp?fCgcG*yxc z+9eKlC7(B2)@OOLN|Um|6SsJ2$_gg<4lKY`>b{uoeD=?|rN+3T_S6C3Y?w1%`WpPs zB>o}T-u&i2U0G5sPS%JozSn(_+#??d72guuMdP-M zZidclRoJ+L3AxY=aH>q>U1!~2U9@f28ge%FQU@PZv@Kb?S@fcZ#8D;#$09;*n0?Gh zlPIbcGSu0e!qa19#Hjq}tWFdGDv@t5jGJH32xHit5s&*vEgaqR2Aw2x~Fh15?l&@sNfxlO`qF55Qj)3xFE|c=|OdydqN5xZw1;CSs$kT+T(>Ot8 zPa&n+@{!V27v#1RC4{{T(CLS75~V%gMmrDR0HBujpZpUGA`p<%Gt7v&OGe=0m0KA1 zoKp%SGf%ovP~zv(pUm%SJ;3_`G0X~e-e3P!PnvVVNSQbi#5?Bo*eQm_itY?9d`0EA z*3?%M(>&xgO<>Ho7-<4RGrqf{x!Ik=XUb8GOpjm2c$B4s3Z(>0gpUjn9+Munwx8AS z5w!AG`uBAWoNOTe-XPoOvl(i#JBJuD5qH*e0Hk(Ww{Qe8{rhTuWA}m+_cH{foEs;N zxSgde4=PM7kPLrrMm%rytx#8XbNN^=_+Ed?@XsY%As-IjbD=4fWiwS>E>HWYOTf3! z*n2sQt6FxIj=PcLa1f3ONUj%*&>*|pfd>TZUcBR}T8L6SVF6k(&Wr!(H_hJ&o>bN* z27XQi`7anesg(ZxtYwyJqRGh2v>A9tH(uUHAfkBA9q2;dirHSB&?X0Oyu05z8Jbib zHCKoF$hzNzDlRlk3(=mDn5r4#O8T5I&qWAdDf>NoE~zaA0QR-)oTVBBQ9^mozJv;1 zm}sy=!k~2fm^!HPJ8B8^1a9<$z?WqgTbj_#H7)xrjzNM^XBA%<>;41J@mBFxqyq>_(6AWD(TjYL z+4@-UU+dlfq2!akn4G_7k**~6SgV=BvD?#289hri;ibP2Xm^u8ZG>i^?WSFi?0`5E z?A16gDGreK(yb2`!w$2ZZUFPbcM(3;k4gUDX&=`Qk?3q+!w3-XFrvueU2uKUY;)96 z>^xN~Z^?71^r#rE-^=50?!^RpOgs~gj8xrFu$vqoNGnp6W2t8kus!D-(VrJAF2nAp zGjl85_zt@r3Arnj92dUi-sqwRIbMHBpE@;kg0VHP-f-b|>=YnYNCN*VIHAY&%&wf5 z{eSNj9LIuK!xImA7B#A+ICCe2T2o|*XAii_Y*Qb##?qTq+-$Y8@gr{@b@wqY$B zy{=6!3)H^t{fYE*MrK__nI!+2VWeb`gI8(29O z0i9(`&I@ghqgbn2-4Xut{p&fyJ_|6TC_^JA0?SkGghd%I9)k#+02n)`8JUW|u=Skv zf|&Zki#L&dijq?xG(%F-(9^0lRb7u67a^$h#8#{3rgB)RH^(UAV~ z1TAB$A{ZCjfhwu%QaCcT{b<9k$b#Xz$Otv=`1%k8{pMF_^)MUxyIr;+kZ1tlNIM|> zT$cpXv^i_s%$5ObLhpD10%LS2o&I_$Uusp0ck}kh>Ytf3lKU0z%W_FPs{*$6FhKSm zbEvC0FV52bIXtWGxxV27``C5-nIrs;DY{2=D&2A}5%&YHbB(q= z6gq+QQw#jz5kDRD@18d*OFod+RHi~fSh&#*yg^HOT^78s2u)q$9HZ_?@kM7)?c9Kf zoYuZO`6jv}-@nT5F#7}!fg+nTHvFnq0AoinD2?9QfR>1I^It&}W^;J9jQlrY7lG+> zIN{GdgQ6IClNNHrw3id}Oh}we#7hlLB&P_nE?bm_2?;yDgzz+=Q%i-N2FV4SCLKwE zO`nEu%_GZzH**B$sd2}ghnXPTil>KFpPLA$FKH_He40UE6YiWB;?%X`r|*dc0Ig!? z8brPB+@J8GZQEG`-2KpN&Qpwea%kDVLt!F=qs$m9ouHrsBnc+HP+!a@rya@WeLgcM zC+XG4>U&nFS1^cs<`#@D$`5eNJI=v;I7^ASzA| z;U=ufYi8BL@__>Bxorvf@SGn^ZTt~1-{hYlASEls3(FvN$rk+;deD`Um?U-#ew0Fz`G!yY=!z1iOgMrKGX@%x|b);=IWo$v%_mI zed+F1sWh;5|6vZGk4M=PBB7g~a{ZUw9leC{XPHkBj=^g}bmAJLzMLF{F3Gl%hg$9O zc0KJmUhbr+($r7HND?@eMTJyj?UVMKW7iu0`NHBtbV0u;#amXXULk_N z&}%j`(~DJkr_r*$RX)47(EKt_JE2FKqA!2@|Fi%U8Tvz<2!;9z3&$tYrPg^;Gwa&T zz-h6U||`UP;HzZVo5ItnV1GT5`48GghIvdbrYjL*ST=-%hW? zCxOWGpfSskFyQ}C{pzV`S0w)QY}p3H_}wk9-0kYWOB)UqwFddsr+MkNClj_#u&iH0 z>Uu3$cOiyQN#ldolCo9Ds9e8?T!5o|wpp?sv_Ya47Hr6_*pc}s8xSsotoA=^Y#XoU zpA?Ha!F9BI?O^xDjPfJVq_bIGC}_zUJ0%+X;9mX8jn!_ndy0m3GF zF$CIPH{~NUVsV+<3~1NFxwMHP(VAaY{mim}W6<@pzfQd-8FSftvxA`05(HW-IMAc- z(hl{)&}aJ^n!@-)Z=*bR^hDC9sY$kRH}^L}JHftw$| z%$LO$cd#CTNpa|shj4yW-x@t8(!e2n4Rnn@1EZYSuY(JWc)7<_IcHkLV?d|;a})Kx zoC;y{USh&?ig7)A=9Q^~mQHO1o$0&1#EG28uSy&Dya(?v zcAe|vAu0#GYPf6;6C5xCA1%uz8JQkEf4h}XusnA_wPpJJUvd2iE86M2G&6X*5P0o= zC8&MV4^_6b;{!wknS2Ha^w>Iz?*|qJyC|fI=UDYRupRG2(@>o{2Vq%ra zknS{ZzNU+ro*~D=R`-lyxiBH=#rsxEIvuCY4fkX!f%Kpx3fkFkRLe%SdgvM;y8M5u zb7^GlXB+G*B%OoWo=B$`z#WnxKO;c8u7!b8r&iIvogbqu&XJH1Zd__Da$oGmb?@Qx ztS!sn@n|T1-b8&u0~I>8s`SI&m9i5ORj%gd#fPTkNNcC-AMKG{4QIF@tNVJeVDu@U zNvLO{n&QQMei=+nWeX@mvq3Tl-_@Xw5@=OD3S-mv13DQ^EcLX+XIM}QZd8{fEo=Q20YawC zAbJhk%^4BI!gMA*v_@{2!>Hc~?uD|t65A6>kQ9~{yhv3Vd+(fC77dd>uQ|7A6Gw{G zNBsRU0SQIb(zF$yW{zQFxXl^tTIrOnW2G~@_z()ee6D(yb7pp5sww@5;;rFFzg=Fc zMYs@i0<7T{C=w>0u{mHpDy<@-c2vfthiiwqAP8&yZ@F*{HFwF7nZysf$dkX8|D+bH zXre8~EK_1ZR_e~7VAM#7u5z(AWF?O*D}DLPD9}i5vJ6+77|k~#^29-`T74;++mymj z^*)KR?0J#IILzXhMn`Hd@)LQ7{-BCJzmKEd?BDQu7)>4e2ZHXwp_8Vm<w*1ctm32)}q*qDT=G*C+?huE{Lk=77_PdzR(`NL4y}qb0Z4b%EsG0f;MPrQubh0W-u4{YfWJ$_!GUxu zDws;D5!>3gLrLI7*(tpP zuY3#nIC_beIIeg2(v|cSQ8=KZDvvzHLz@kobB#$0xBW&MX4&ELpW?`xSlZyKl1g{};5;iZX8PtsET!V> z_E5-HbV}z~bd+gQoQ5pkz>HEK$LvSvZrqN4m7t0s zgg?@bC?5U8+{O>MP9*3W-+4&(HMH<5C1U>|@K1g5UAj7!f^`PV2Tb@s#FmM&9)w-L2 zW$$QCqXx54j-;DW^OUUt!I6ltPLir_<)?{b*q(b7(|B&vlt{tLGliYhB?{o2n=&;Y8c$v{%cc~01kzj z3K%b~tK#jXC8?h2s0GIkuneMFpMYX4OKuk>uBNd38fmT+%6Dq$F_*1Js|4jqhr1 zR$GU(#skuzM}*IpfVY{oH>VdgkoT$q88rm+=bXtUt;g#DyC=N}=qbBh&v+gtibEps zmCTtK1W~zeaZ6#{Sbx@3m`irRl)fZ$-5MVU)=z|u&0OCoLHDv>?icKLC^_% zcuC~E(TDgE`86=~JVgQC>g9a)?{&ZciFca?_{nv6EId@^R4O?*#X??GY?tCa4Glr2^u_1}LS zu0Y75JNY&|5BzJJ}`K5cU&`vNBeL zyyA)I5GkYm7vs}qG`44&-;>-GvpkIj;Ozku0y$vFjAHCSI(V95Q4as$P0N zY!Lr%cht|ZMoui)lJ(Fn{t)p#JjKWnIkCpQ3KSPL7Xa$-EwqSE((ta)`aFWJ!?okk zj7-sA6Y{Gc9UqJmAI){4gZ}2APWaeY!tF{24m^{o(y|Ds7D)fS9Osx{lJOn41Bo6E zU=JKUD9*KWPxSn>9UT)fgp@~Tgms*3$o&4-W1U*)hu*3rV*7#?Nlx(~+!-N~G{6DO zQ?4^>7RSLnEDkjCtOjrIo#pFuD$69%bW`U|n>J2K^zSUBB z|3&PYz$;3g)pp>9NRCI2 zQu{QlYrx^BTcAXB8reaxpY`a`Kai@VpG(prIfEM*Rcn(O#n# zyu9XMcoye(9rVV&A7S+pnhXxME#O4tvVqN=-q)VP?I!=ppPU z(D=Mh9jy6y&D6d$6VnE#!|#KNMppaMksv?s7mowkJx(o>r$A~Y$oNEt&k%QqDCDX$9{cXSo#U4#4k2c6T2I&=33uHMKGhjtl zH^{V}6qHItD^a{MoCkH+GmLj7if&~Z-+3)-`tG`7Vf|0qp_v|6rm2gJae~hJcUKe- zgaGUVZ^_#idG+I*bW{WvCn~j*z!?Q9TkN;$pBLo2yUy~KXe;(?=YFx*;@M)#n(2wZ za4An1uRXNJ)Y15oA@mPs0~-NB8(>i&+cU7=egp}q_1{RCa4;(qL|lN!0J;O#>j9r_ zQb)GqxxZdvx$(4=o4~Y+F9e-30}3BAHQp?>iGn^`nxkCKjZ7R48=GM1%fbh51{=FwzCdX4HP3)w$UZSCMlh4l;$sK?snJ z@6zIimTy5b4(>iDjuWc66-D zZX2+4csB4w34jUgS%$Fz`e5a%Vb%}-b6otSyOvVAx~QCgFz}0suF<7m}2Th%R%aK0JQJz4oL!?K;_QHF|}m7jH=tn~c;7WbYON>j00m0j{OC%W=*1 z)^m%F1Kx(V+GjO3{LM(rz_-jh;>nJV#~FrZKDf#tP$OeM%Hl8N3E~a<%GouB{*1JF z<`vuyT?MKzKrh*HM#th){ein0%OfC?+CeRmE)vhGtVjF>J4mO!t+20?jlq)yun5=t zBsn$Y*`y->s2ugi!7QlM_G=7tiyzh`XrF>~!@WZr<>RD*Rx|JF1^(H6?~A?TCFG>Y z4kYv~PK>~`A*7Y$`3zJgnmgqv$P{ogfKCdYlpap50otd;L!PN>1u*Ai3%mvi)Pi75 z2~X(B2~kKo3RJ!eti6LDD&(o}3cxv+(V6H|Rz}fdyj6bN{f0zNH9bi_>Mbs{EkUb} zSz8w57}rnqa%Zb7;`P6s3u>_1g=^{5d1?YndtB_gWb$P~?Z^Kr0l=$)+5HMos0?O- z2-)8V3gZB}i@A>L@{qt&P@IL3u@-XXbA1CRgl?=uLs}_RaDY(mSscL^=m4v>bs_^B zzKrJ4);U9|Po5`)xM_2C)vIY*mwa|c=Uv82Ntd&-5NZebH|Vs+N^YP}{7}2Vy4te7 z)5N0T0i+GaFtY#n&d0c1*Rl=jkREdR#|S(n2#e3_yp)j7_HA2-bdx(w*PULt?FQH< z(ae13A+V%3#}=AfaDh7UyW%&7AOxJ9+xK&LRJAFZuZoax9PD>N@=t^)O_RnSkLi<` zt<`oiJM59J-s6u~Vvk%R?M?(!mr6zOW|I~H9!E)laTN674^R}^n1lGtcp2LN@)s3Y z6ma?$(%KER6@K-$IqSlutY?Tv-oLW3S*odRlWEk}p;P7}*>&OXILJ$d;GU^+W_^ z4ZT$qfRp<(b*Qa{wfYLLbk&&H}kSmPa*`U zR`tjto907PVMfO_MA9dTV=garSixw()>98{&^!LU9ziH(Swvp8d^TMy3)3ODGQ3s3?70KH4cGem_52kWE%~)U5$FM86iR*B zIN;@$3%E?zTIrK0s3#9hjazNLK1rQ?nrbFdQeoZ&XJ1-P%&R*nUUq})J3<< zzgBWXV$+@FhZc3{71=(ak7Il9u?Z4^e{urFbt5Sw26oW|ChbX{l7y_# z!qM|1AY~NngFJNt&_tm1Hv}bdLJblk?*0whVbP<4a6y~FQ0wtPeNcc>ot|v)!i2er zYM}wTmjLok%V#I(PUvT?%BSwGs8 zA-9KBI6+XgxIY~f?3QlapeJ!Ky2@fz6*1!5_6Yk>h{jYWG#31g>lH6W?>jMocPSz4 z_=jEmBM(WQN8T?msWa!QEh4(SeMuAL0myd&M$gqO>W77+tp78lp$rpPaIP5dpbsxciX&9%7`S+%Mw`XMTP43)tZW& z-lBnq&7w zc5@u>KJ=HR<7@*1OxuNmV+fQ-{_%MOMD7HLT#ALQg^^+KH~`m4dRK@f0q}GyL^xiW z>hH}R!JbkAvttSXLw%AEDEBTtj{fx(<{avQjsWsodlPlW%H_rRBKf~4U3(nH=nYxv z-G{t9ra(hPQR6M=gxFF`m(Ld$5d!fCrwJ*(L7vD!fMtCALffFq=ph0w2=ZHC@bf4K z%5TBDC#RX(Z76hPDOO*HJoYE({K<`nSYL- zZfM%6)V4pT;zwuT!>_{%pzJgtrW~O=JO4L7%fx}6faC%91b0t?B_WZs!(M@G+ne;5 zf*ANqS}z!Y>ZszIy4q&k{UWCiXrDH=BP0PI1sWwLd1*Ir-65V&^<}|L>j$%u3Wdf# zXYdz+*zP{Bcyt{>{ap+kH(X9VL%=^koofVgOx7*`vA!}+4UF&e$RJhB^%gn^$D)4q z0_a4(HS7Zf(XOijXV?skV~#-^;~OY$PlX@h-6cT+|CQf(v1I?Lzh>ODEbo9@Sbu(+ zG8_pJX!_l-*=Ial=i<4s+UZXN-2S&M+DF?WYWX$W1X6x}n#)#`wVx3{dYD$S?+F`! z{@VZrIeTC?6+5&OL_p8{t3bpEprF>fyK42ts^ETh87oR?VKW0B@YdGdGD3=7_cTa7 zpM*XEwkR79cd|r7!$0hWT}H3kw{UoC)!G965g$p@K9Fti)1A~1=q>#^QXirw3@TCG z+t>ZDaOq(N7$Y29iVqz!U+gOlUTBH2Jz5AjxAe3FU8~d%SPGV(Ti=g&@I$1FaLk_l zhD(8+WOH?WJasV6?{4>vbv!FbbXi?eOx-FK7}vnqLT^2lB`RpazJ+Smy!1!0&n0$7 zY|J!H%g3XwSv8U~yPA_^3Z!&JUkaejzwWa7xmef?=`!~4H>i(K!8~OBBLvC)j=YCf z%FyB@WLa_$LD|7v`<8)K2Y`pFPZdOn_PF{ZB>(;IUi9^NLN( zpqSGovKZ=SOx14;;zX5fU50I6*hgo8Y)5vjAB&^$$_P8?4+~;CL&AxJc4e#()an7d zYISklcq4$cIz<3!KYUT3tJ8ydm;q~`ZqSx1iNNM<;0xp(5;3VHX!JlP#3tNzo$$6$bcN6 zxpDgl4j%0M4O(Fl1SjIMzz{scaHMq|kjuVgfgM?}D+=(gNY^2cFfNV^mjiG6JR2kc z)8JRN8u=^Njnu8LGy>^*-w>sC!pZCn@Z6J7ibLqW(v`?H=c1jq{#tw;Mtf&!LLA*m zZQOjztjU%(de0gn{-*rOa2KYhuq==<2>C^F38p?w1NZ*A-|InU^~oRHbEjD1Sj)! zDt|@Go#h;)t}cEZj?Gd>*3?xPqvh(n@8~7mqxrrykp}z|B9uP57kSZo@6cW+7=;x5 z*(dp+3>_|CcK!I3!VdHh2SRkgl>umv;bjm5eC2m8)Hgt>KY# zG^~bw%(lGTGDv*cDNwB)Vcb`@bPke)cv?2JZjYBr<4$DmsWpLnTo~fyiHMT)U@`r9 zv!%==(3y8cAX$8!G#L7%0;EvSGQewngsh@1p?gNP17Zpotx8b0PJcTuzxVUV7|>9( z_UzkgfOc{jTXZYMxS;~jhPY9KmiLq(|I|acp5m66TjYO;F&ONf0oWI$Jp>$Tss@Tf zh;!@{JD@84RKDD$(eZcpIwAK%4VgMMcp@>pI_R3iFRpiYmpTAr_|pb&_vYmItDE2m z1mL(_1r@cQkXS>$Rp)mixiOhz;)? znwgr27^#yd-H%nJRV9$z;?8L_2(q4He{#_^KSpSaeu$(JcDB+Ooy_qvQtg!HS1^6) zfyHu=dmR|YWbq-PR>W@8--J;Fv_{{t4Xtf8=*#p20GWi3+1><>oW$8-M&?G0hY3meAcsYFD%K{J&B%pTi2(qcHL7K34x-KUo-Dl zUna?O^wCSiH}WrA)bK;+y6B)9LjNUsXAr_bHv-eRa(^SWDL9{;>^#GAj)b(1*tS4` z;+}WL;u8gdiQ@udU4_E|+SY75j4;}opWLEZ)71+H({u(xB%LNDb#b>Q7iL-TfBi*C zeglHD|54CHPbzFnx)~7zyq6xhN-}U2?xt33g8%i#rBAV>yyGSJvW^v&{yLRUR%3#@ zEvKKy+1YFvqc!mcu=pGZ&LG=k2mHH{83?wdYM@BXV}lOggWZCM&!O8EBc{JPYiTN| z(kElSe?tvTy*~#UoUTlzSmpggRZb<6Y#bp+wrP^@rTh0TX<*ebiC&MpS%>PHcbM3Yj0!W+Hc?_tc z(T@pUV7K^`%^-`-Q5(qsuQrIfiuD3gl~#yA6Uh@h*I0)6f4Wm>Xtl22ox6b|@Egcf z?=rXs>sT_ziu@%r1K$7kH>5AIw};ID7^gM)$lbXA5}a-*7j|Nc^IG829@go#>L0Yf zh5vsb!8$nx`p_#sxB8@!NNBCegb6_s{3cKaycbL>wZoKWcmGxTE1LU>m`4y3&Z^BD zaQ(h5;7UO_$ykq!Vf*kL$UiKvp)Ol zL9_37N2f2PlE1j?vTYh~zx=3$*YQNBU{=sfF0GYfU5>9e8Ooy6q?EgA-hD5lAsx$` zCk!X*1)l7@^+8bprPwu_76R&q z9wS0y=%8z_XE7LvWC9AZo9{cQN!s&UD$aB#!9tTyuVnV$qjpKm5yhLgcZ`O}P?8)d z6_6&zjd)o4Cp7>bQ3!teWf}`a>^gGMN@-Ebs9Tzvr?AdS>UGREXXdO>87T;vRQoPE zb=jpqm$YKJl;aSth0zFw^%wDaK#U#m0IIR2l_O!e>ySxIXUi) zJ}+f6@^AY!#t+I@-W$w=oZ>rZLo|Lv-cEW%EhD#~4L~-E%S0k*w{A!V4EM1}dJvEv z?R?8BM&)_jr+gB@^Av{8jn~|UnF^RDc_XGW5Apgubwu{e4n$qdXh#e_YXaF4>+Nx z`#bc13F4N&`>CD?7I_c^%!8a&KW?|wz5ZsJA)lLNBwHS z7zwmX*KSEj;-5$56m_7mx-1@$MP=w|((e>HJicg1GBFOY*2{ppueD5g@XYR#9>ZpJ zC1B3~{ur=0cRLO~&oo$XNr&liQF=LA3$Y6IcW+@d9sIRpGC^;taFw&zu;b0iA3k&1 z3@hb`8lKJXp5@G$f&XeW9&*0+69(gz137G{lJ zx&T3Vym1A^tDNGhQKx=lub0!Bp%P7N5Gvy%xY7UQF6+!vo~N=bDVO%<&=`{KYCM>c z>G+s=gQU;V&qTUSshB)o9&Exgw45Ai5e6rs>3=1m19+^oi%hTdE-#FXTU9ZU6+dUkoBF5U_pc?0aw&;IZOWk+Ap zRJ2_Q8!p`>=_!e)ZC0g^BAOPIgRT{W?}_)h&M}P$!2!3zQ}PuEsfTHkGSzPJz=rJq zZ)KpVZwGx}zfAtPrzHTy2H#e5=qcD&_kmz$4L%Epy{>kRaG-R!D6YMaXN+dg!eA6A zU0;KFB~o7f#e1W_P-4ee-f5VjIj9uBs~V*t?fEWQK>j7=9GKaQmiqm$Ok0R7xf`NH zhyVOB|Bgli~w6G)5Hr08$WZB-52M*=tqh+~tA05tMr3>yeW1P7D7lN$4 z0Fp?i4?v^CBPi}wt3Cv=JTM)pz>}rAWFaWq$UflO#g!6ya8q(&1F>8Es<@(NdVHY? z#~ibYw`FJ_=nP4ED&TLvo#vZVz|8H2@>RL4sWl7y)QWt~Wljhge81V_|v-3SZrd_EpA(#WiW z{Cf4Ktyq?^NtWQLH;&ua&N6YGm{z6C$hPYo>T8sh2B&u0%q0lbMf^;xYZ`=Xl1Dmk zFlZ)j^#K}dSweu~j(4Up9A3m2>GCrJG+@x~gLx#F{>Sr8HAjDF--FIDHO_zYQ7Y$h zhkva8dCCzbk6d#AhtlGvFXPLj{J&W~*K~5@Xn781s?woPj~r?woR{s*m~M)G#BYE- zb|vbntbw$D4BqLP6`s=uTGF{&3xODzLKBlPx;**p?);?={4A6iw zpmJV6{UtOIcK`zIPt*H2-;=Zup0%BS^g0zu7#=nA#!7LOwft!g4KRDPSN{E5dX$Rv z`>^CWUBbI`x}V42lRsbTKV%~=OLiP1_T^gG(t7=B@@*PiQeH2@Bmby!G_hedMV|1xpZ{PI8PFehK?Z6P@zqP9u zLHu>}Yw^8ZzZEde$P4an1wLO7QUBF}r}EnMGZpSeEBzRMeH(h;{q^d-;GaL%UN)Ms zo((#iArqpI@hOlCZw^AbJpqdZgNP(%`M?q&U{fCL?*wG}z$wk9X}=dk(u|2`Y;0ZW z3`#SJTRf+fUz48-@x&o|TuP3Q)k3Cyd1qg@|3nDTwcjLK@O^z~(mKZ9z~$O7I&9HG z0}&75P$&ZH*SMD}XNSHc!>VrM%1r^`8Pla9q#BpcBX(92C$JUwh2mACGsQnL&ss)j z{&L6mYQADZ=;5k$>yyhi+~hE>x7)M+Kickr`%p*~(?NR)0w%2webn^duPP~b8Fj$e zYNO{y*K)InJ)9Iwg*1^`JL=*R3MaR(wg&z(vrPn{EtF%AG1ExPJb{h&lVjGg4PAiv~by_KGx8xV{9>r%oz(9CVXysE! z&);Dcxy8^a_^`i8{;y2PeAwY^*p^s(zIatO$KgtUv{dnLa@cNP7^T4Lm*ih-+?(w9 z1o=P`82T)LU}xZQep3Kvc~6Ne6zbQDqIiHX%8T11ael^a#ohQZ)pKOe)R2H5_w-HF z=&%ktT(>*PX4A7@G{V?Xr% zu=JLcshhgecfmsK(#|KmK_rlbcQao18MsPc@~QN40kNi<^bAJrHxf-{+tD-o z1IF}4Nf-6EO|#ay`Z2tBXXXG@ZdmLIheI$ zZvR7;xeY`N1QMMg9bre^PcsGl?W;SXe+3fn<=9JR;SqoP6R&Mo0!r|O5~5WE6Ki*3 zrE)E&+87G(dG>-BRbn0E&ID9Kx7@DZka%}k&li)lzNlHO9b0>((5#e!IH1Y?ppx>}6LIiotg_ki9D zK@M+qWdG5~QS2rDLG{O^SWOiNRq=22O7P`=Rqavvv@ab$aDSf4R_rMEymg59{m^ zYy!b)?KxW0xvM`EXu;Y``*hA!48M|u<>E@1#e3=2H{xmXZ!A#hUl$$J|M}QfZhfi1 z4Aj4+zpmr;Cj3!81Qf;4+|YeOOxEFTNf7RmL!h8U!#d8hXN$wHEuTmQvlP@x19jqq zZUf+#l|src`Y34aigZg7xETrW4#z`%Q3J0Ir~$T{9>BFXZ{E(D|9fy+|2r~Yc?2rP zBhlV>rflYe|KRK&FBEN{kO{ee<~w<2FxTQ$0*+c@{sy|Si1{~uG|9Z&Te{{1?`zx{`Pdj2mzz31;L->UxTpdhYvW)_rXZ*Yl>_?~VmYi4{tHt|$|U5sS$q#l z$-7|7BnAAteOm`o7?C-JtCXY%ib-x6AKyM$w~>ZOZAMmpAUuyM4sNHvu8~gG2Sl*D zImJME8RYn)3@r%zgg}PDH_&>Vl3Y>gnp;ee@Qf!hF+>xM|GF%O*#FHB6gh;!R+yc` zS>Rev<^o_WB>~^8u^4_^fOp$)S%j64b3S1UPABP=5k|i=nXzAA;iy%Dl3$OSMEe&~ESB9pDT1O}ZCE|5e1`OE^B9DW%x zofvyI&~_Q(xotBa`UC9z{acjb^yu&dTX(+obrn_k_%FDTF;KVg2}Yr2d>%T? z-Z?Iyp%h}e>D71jJ~M;@#tRdl3TXD#7I67Mw?5lib=ef-=a7PcvOZ+R+MJtF>VK-1$2yWWgh&59zO~#(cxUZ4tV5J>zQA1QxK9M_r|_+1>(h&b_Rb~_Vje+3;ULc}~f`Jw=yK;XSjC~#!)sm~oz z1gT(4!#=X*3QZ_j(O_ozo*3gJ1$t_E)hveLTv3ng!Pj*xU*!Xz6G-1DN`(*%6Lsxr z3F9@tSWg`P%t*&A5_aC5PHZ?TYQLx+kvEP_0v~Vd#cP(CL!R-RkfT+ae3A2b zgj5g4^UcHOp!fz;3t+vrnOxo7v(1Ya4630&cPqV$=|K~ECPEMGi?xM?Yk=p=&VWPy zd42GILIC+~*RdLUtb#hjl>6g|qi>5u$d?C7hV^OsOYQgAko zgAhuabiY%5;gKzB=t)y%-BO1RuvKzlO0UXD2_eXzOY8bIusX3Oig&>Oj{*+?nl7LS zQ=^Umt1j?({qovnnn=7TgmV%)wQc}4um-SIBWO`wyPI#yE@$Ie6&3dlU?Mo1PSo7| zeWanNWT=Jd(e458YE^eK<~w|t8R5w_;i&{ohU`M!B15sv>K)vHguPcm zY1dJ98y6qf5jQZxlliYoAOtB2S`!p5Pjy4JTU#H1lTh_lcZuG|ClT_m#tkQi0xT6| zYG9`;xv)>o`~~rD{F5vX!NCli!H;U@y|=_J0TW4xolCf)X_q6e076_umuv&=syp9C z-qhc*r}ZfW#ZQL203H9`z%#O-dS zjVHHZVev79{AzR>p8xjSd?>{JS18z?&vMZmP|Y!avX!h;nDW=yldzp^m^|mjDU}Pu zqWzD*yoa~1JZlM0Pwi5U2R3kdl-`PN zg4~hL2v_IC452sPd;t3Ri8skyxC1H?B*qeb0Qq<+x~*?ePD~Sxry%2dV)!9&7Rj}= zrk0vOEx|t80t69&`Vgf;GK!WIHYR`!DFTd8 zEREV2amleuB>3A&hR>RjqI@6`a6oQ8=6nBHE*1y?=%Yrs+Ipw0qC)GR$hLvbqYy3U zgXijEikvSmj~1rpChLEI96Y$QE6@un z`?9YOO+7zC5DZi^@lya{;4Mc>@_~|C_Z1b?p4F}N{fP#|4Yon~t;@>T(tQoUp&4`t z2+fj|=Nko~ATL^>>8Y}qe9l3*-oABoa+l3Ddq8?3?%Ksbs+j=XqE{^+ zQGP-=ULjpQJ-&l>6u1u$ViO1wxp?22`COG5Tgz)dpS!&C%ZarqU^S_kdVO|x@2p9K z7&9gN%F=bi#zwr99J3B)_zy?I(M+@YB#?>B=72#tH&NM|Tat;8r#n6}LsV3D0974aL&#Fa(=PrbqoAQDWUj{PS3eUQkf=>)Ug!FYq z&jf!mj9E`N_@#{Vy32?R?gO|sGv)l16M~Pi(u3lvCIEDTp~T_i-@CF?2qBh|9%K=S z5xn&#&Oh#eB|2-M3pfOmV5t3L22(gBTJX7nk0@Fl|8b(mmE9ZkV9*z*u(8aqpA;qE z2U2q`_G&f_YDm%%srb_^9h*@0ooh;Kv^wwV87l<>sudH&HXg6W^?lVhc{nT>|H=K_ zhdblZ`)?5%ODt;;KWEW~6fU7MNjNxOJ6*CV}lDn)azjUsnJNUJt-#bkP}@LZ|t*fqbJT z%&8%c7^1Wv%3YT8*ltPNQ$>vD$#hztTzN$f)rt#ZS?o<%pEqSf>N3p~)CDA=m6Ky| zsKO*x>dY|lUG!ri5bG<-4oM(VFx89kbp60c+G_FuaOIxYstF#5>AqFuk4YuimQ(nI!XigHCM z<(CS|i;faQr>P*!Kb_F(5THp>6M~>(>m4%nOaJrv1tm%#hBM<>{Yf0ya;SI#5SCu% z9&0qy_|D@~^E2ZkgNlZ-#m0@sh=!N<|xOX7G=VuR;msjylt~umSRc~nReTS8O(QPgI?-+PEzjrBvCyVS zpE}(gu%TcICT=L1vC`WnqT~`#!FAoXzj&Sel(`PWd<;U8NEnPHAGosFUv(z_Ga9b@ zTN3QshmVy;n%N0vEW&BNg4}WVlFwM^EFyxanA_mGgUpSPogl-27vK69Au$yy|vk<#Wd3|C&uWOgYJsm4kcRm&YhU|6D;g^KeeWqt;8xw>*k&;f)i#UqbLKM57hs0LDF$a0WX5@p=*r0CY_D*rXvMv#5bTu zKQIIr4;XScf3{9zAF7f%F@Z4cc)QSrG0}Wp#OE&?Ab)ueeh9@%qb)t3;nO%>Dpq{d zfQIAp`Z_DW6;|oODuGxUk+Wc-mU!Gjw(X=xuameLj%+a1c*V#=-GQqGsu}O zV{t5o21I8hj1eca@#V(IGc@I0)Z45GU3CXA(a`Oan0a zl6FMg3XQ-=gGP>Q1Te)+fNSMxFR81Cwy&_kKDtlEvYA*Uh`%?(X}s4F^z1g3Jk z1mKhq69=YuLTzxw02I?CEI*T;z`%$SG^{)jpZ;`F=(l)a99~C^-^mc(J|5CJ*!Qua zGqW41o-!THC~Cq$hLI_5WS1ZG>LOZ60Cz@}vz{@3ph~Wa7lFv-ZzuH-PB;c-z6BZC zo5ws6hA9OtSnu)0w}68ka$omir8(u@ZAybg{OcOgB`dH(aq}MSLNNqP0$XhiIZ>?oTP4C^clj+Rw9Urqq{&Tcz?hlf{i;c$qH?s z16|C<2MDPBPPfuSArfcE%lLVtCiJUz=pqRbuu1&g=TLxoTV8(adF{5TeXlkzCy{tE zumKG7nH*JDK84`m!|M#p8D~cMh(F39 zLwe3%b(lnd_2^ysE7eYm3jsiz-ZJ3S!gT<3Ttq^UZ;FSAAGbf+-7^+?2u7lWg{y$B zp_{U_#8cuVayLLW6_e$`21M9mW$@dLQflbe_4arR$H#(v1kmS(I(o2B(A}D?MGe|- z?5sK0X`Co1A<3m#OK)Cldm9x-fN|cDeovwy*zL0w;*%J1Cf@#YHh=_Y@Q;!LS$Lrl z7w9vmY+izai)cC{s021wby~objACX1BM`cjqaS|;SPO{Ut9hWda`^fz{EI0Iwse&w zuC46zO$k+~wAcAzViP~U-FpP+`FFWP*bdB`uuCf+3 ztU{mjRIl8z5#A{5WC&0>|GM8rw2sFB=q3RgVi-$~Dg(oZucmYfM!r6i1FAEom&!)J z@{cFaA+uKo16LIB+cUS0z(S+VuypM|~nLz#c06H2b3xrd_jyIfoY5HerQT;$}S zb-w~-eRLDdrcvSi>+2v+c-;UX<;kYf{lG_k7f^WQTnL=G#pH@@vrFj^<d?74G{>AOwETDRb_QNgLN6Of$>c>9$(9lI*R}nj7F->9`x-!dq|L8wCpf z!EA6XP^ulmoPuQWm?o(0yQ8Ja?BG`g2ru*R^_TWB->?Nat8sTLc{oi`RJqjo5TBtr0Yo< zB+%u@4jyUoxiSlKc9y&HopQ?|Mahr5c0x9v4<#?_oGU`71R(wvdkf7$-*17<=A&x| z&mhmI&?WY#!psfLV3-ryx@WKX@xy6z{nf!Vwv|{LIgpm|uc&lScjORT1gKaVa!4UF z2wG|{K!%h9p$;A0m%YC~a^@g!QK)*~es{CuiM`8bzm9BNeBVt}s8Gd9S=unk32qxO zhIO!g;fqjraP&^swniuF)8B#+VRQgKtW9YBB-X}_B(Hl>p??u@Nf$+(YX}AE3PX0 zQG3RiH{kC@^FX(95%=p(KWwxb*xD}qAQln;_K#N}nglb+MPkr!5&XdJTa2hLJ*EUf<&cD$e?))PVS;iM=ANkH}Z{tWL8x z&Y1m;I&wq(ro6;c^A^A5t;5k$k_E+=92Ni6AW38h(2{o#76@$(sFUkx%okFqx~-%| zQ*;xyM}t95%^>x6*XugSPK4V2T;|)RC#b_;cKGh_hH7`^78_6`;s6Z{pXU?kn|-u4 z)l&du2WJzbR+gHmeB*e+$-$A<7h7OUAFoh9^$Gk={{tyWX$*GAnibMIcfAhpufT9D z88^lS0qjx>EuaT6Vxz+p_UapS0I=-uY=Q`{90!0>%o=^am!jaHDm8h^3OEgDIH8dezlHA)NM{;zhetgZt39 zB%?!T)oVN!Qxh-oOe6g6Mj2^(mGP{9zU&58cujOq;MbnRyNXLvn!a zz+Plnhbt+Gj1CcDio@=4;MmIoou(5+grjcPpF!w&&!uF&}8BzE{=s zTxfs2%VAAL{0H9tn)%mDKI<2sIc>HSAqG$)T5^mCzaR+wRMrbLI>iPCJJJI;qS+$# zR+fH&I=#Q=E&J{jY`5qYS{rMYn7Yo^Ff-++50E*myZ$oEyJOL7&V!8CSW?CF76<1{ zVNsWnIP!}Fr4HlM@4an~7=xIUNTG;Yaanfd02<;}>r$acxxa+LT_2~I=~x{p>rjEzwA&l>Vd$ycql`n^DJ4me3 z!2Tf$4YV47U-sQawrYBt-Ih7Upy%3Y(%ES!85sKIc<5&f>#^VWSlR%CVs}eXdgxY< z9Ffpy|2xjL5wNButGlCe)Za*Np>SOQT;+po#W%<1DU|7q6h=gX%3;4uV&VGp(N1Db z%e$I8qL_d84sxGcQ}0eygSMAwjSQhLw-2e|3xFjo1cCB;&*Z7fZjvOcHPzkxIeLJ6 ziZsz=Pv%yCQp(ip5M&5UUC$whx~xzcS?Sx_mQbhCF! z&G^eN8_-5F9Mj}F_}V`7i^JOS$Ila*1t%G9f)FeF?KkE)&DV!q&gNIjN_oI`d-XOEk|D)VPP3&!7m3aPY2oBF;6S20+JL*X2rRR25 z7?%0hXltJ9xywEZqWS3l>~Jm-emYR?1}DF%1wZXob&-;oj17MUyYI<&c{e>TZm*GL z89Z--k0E_?%U#0}o8d?Bz{i3B`&`Th&CvGs&NKx=93mCdYgK^+JI*gbUwv08qgsqa zpJSQ_m|I3#j|aUHw3~F!Hec&#c@6-rL?5z9!Jy}=pR?@Hdb(@$l7liqC-`u?n*~=D zq&OBkV)aH`TqDtI$B`{)Ve{ge@;`j2Txn>sqkb>F{}iUiy7uOZ1>BGtFEwwWb11hW z0?g$rfQCm(MFaPz*_BkZ)8W) zNVumKhIm5IYw&mn``o)RVnY%x{ZsYR66Jp#6+!H&z{Pirr&%==eM8`C_)Fsf^E^gEQBYJCrTpWlDN z3*B1#C`ZJ;yER=IxtaqJ+We&yX<|s-Rpd-h%m`j^QZxS$aKnmN=GP2YX^XX~qxqBG z;cowmz`O8%{2M684&79{4?v>d4+=btWw+8ot2fD!9-f?mM|~Y#w*fgldG%1Mc$QKt z1HCP-Pc0J+K3FXus?ZPIU)VY4DSGrWVL;^PoTYtY6M~b-vti;9bH<`6!IJD_>@~i? zwYN^+DtS*M_t5JzI@vUVNlHP^SB}Yec}b`Ow{+Cg`WoBeDpx7_M!8rG;sFLPwMJFwqsMU+oAOg`2+XgjYHMc9!Yu?rQ4Lx_b=QDm zOS(pPdBM*g_U@bl4B@8U{cpt`DiVHu4%HQ2`l)#W*m70GpuqZ*X1h$r41WTe-cS?D zqe_KEvQK*na-}7(90eKr==9kQIBhpT;Gy8qyf_zQzcyGuz5^m}W(vB9h%Fdsh;Yy> zJYfSWt>!V>icAW%1wE;m+7fO){q0bZN=dXGUUq;Wv(xh)YuO8mjqI!<{=}_0r%0_u zo_1+$ZtfvB>A=s;sB^gz93SG8T)37~Yql zTg5aV&~7Q~6L{r9zj{)X^pslse`WRVO6SoDZtBP5W|d)d>=%=lQiPbx+43bg2~Ht< zFRWXn4^V6oV^Z=*-*47>9C39{+Q=UUKHv@B#)wp^hozUFox)VEZSmd!6mbBhw#5%K z5aAIK__j&~QM(<;m7ot+J0O}2iD{9dLPk4k!6%+eCLSE z?#54q6y*-+8{8{<1w+u-USG?nhC)0X+$OT}g!d&u@w$qiZ&IqnlY>13<4$;T)Lm(C zigB-dJBn^Y7)69M7wF|-uD~hMz^UY|e^CA6!@Q#dR1Me#;yh5kC+DNC#&%y#;*x!4 zUdyluu>95Tr_y$^d%mnnq`T}C9igs*J!djdNd53D40-9R&`CTgtn*J7Cw-v6`Q7`9 zv<18s`6Q4f6=2CN>z$uV{(1*6^|v6&w#86G=(TR8+?YN{z@>W@1wCYcD#DBuB?MP? zMoQVN!D3J{bA^XFE#a=SVtx=*1a_-UkN>W~loN;z|4B%VR?ueWk>&!v-p;Y@m zu)9(`Xl?+0fYx)BINRyuu3~Hto8m*L#R_+TZqdpgDzG_MrQc`!0Tb zlD8ImxzWaJ2OItN1;w048q@@uJqVZ0&db2+it*^a`Nb0`2e|}ZcT@>b1{r@C^v%Iv zLpWmupruw88m_G;sNyT(U(!aphhvMU6yjq-O@Rv0jD$Rs3eFf$URDY_q)_=`LPhwl zN`Wep_eYv;%Vb&)l>`f$nb?Xm_eC zFYfDb0n@)1tZqF3@=AicVTK;>fZc=$0tjcYe1GWM15EOS&2<_~%?eAORfRiP!G2jZ z0{>8}BfxIrV++OuW;WS-yA1z;@g&+lQQV)m9z*)~9I;g*-|P^Ssq@kFk2-5P|9EV( z>-VfE&4{hIfzbt6a2T6P-Iitf2oyY*Z5-)Qi@Oo3*48R2|U))rM-d!!)It^EL%b*u@lXw_#ek# zGbDIGI%2njrjp4=J_YBS%X@NtehZEPPg0LK?HxA!uAYp$(kg_b?%eqm;6&E%EJx)^ z7~H>$wEyznP#ojN(n>N%PMwkfk1`hS$rBe%)%Bqdz1u)1DQ{zMle@n`^zU)B-yV$& z3r|412;H{%8_MY=B1o1o8x7(ae1P+~3`jb-v$EPmy=2^5VFddhdRm>V#f)P}UAuF= zhu%6c6SB=zcEAFAI*!iB_Pe||r`NK=NR>^#@n*4F6R8Z)klBAJ+6(e!2>5f(9y?EH z$ogA~re|1T^Mk)cL0kqosN!4;`AD$Rk9A+)$B^Nn<8)AGE2Ij;lxINa&Wn^98cz3k zzLrl1UL>^<#!C^|r;%g=;@wQb?@*dSsRuT;z=aRx{09IyVy;6W&vvvs%SzUQ!eIBC zA0SD+U8o7nZ+;@)YVzbGFKtigKHB}1MXMTcXU+$jfAkFzsx#%IhWPZ;oY3pttslf_ zcQwkC7E^}X%$cal z``CnLx-)}(lYEdgPpETeb7ul^U*E-OUicexSz4}sbO&U$`X+&gQa^`!!9tC7tM5;7 zE7Fo1IH>XO{(6K2>UO2TGlgvagmOGXzAQnaWRY+G)2QH#u=q?}{^7UZCRbpqK+!oj znR~5lhVSqXW~_hS6+DqS*_;X@KR@gVz_uyK(2&35lYzO&Bj?8;F9j)5`2B|h&cm@? zlaJVZmDUW-SUh8P@<~EB`a;#_v5LoeQnt3K+n2C!Ko14Ph`czfQW4@b{Dd9@K9?JA zU|*=kFUF8tg54dl?JY9|q4szGei(saj;`_w*pGwI!Pu<$%un;fZ=IOP%P_puLGB(Y z{{VqmoYq^L0?(n^V+Z9p1tUe7@rGt5+Vvbag#C~;oBs~bvA~iE7qOhXu(rXL3-(dP z%aDlay1ow2W=o#MU~O54CXie^!X>r~cuaM?$NZ8~4~pAFFa*GP@E9_nhY{%6lugF! zNmdX+y+dFP$(;%o#NxT(RxTtlwvtD!Hs}jL;$GFgU&e?hB{Ct;I>Qi!l|3%|5aC)t zs>ll#VKwBSI8JF$0ArH%DlY2oo^X`3*>O;$;JE$a_vu3@E*>7G%#6|#68_u*qZv=l zi~CjL!3Nr3?YHpsjDyT+m$vSWGW6XTU|jF4+(_a_{XyG)mk+c8(A-S=4KRQ#(MMd6 z^v9ed)?#+buP_V)!ax!Z+Ott^2>(4(o$uo>?`W~p9+X|AOy!~<3bOySOX+PrK>9(( zv4tut^ZmriX`buBT>NvEt@HAmI#FPRLxQR&lpAf4WoHyVU_3?*fs770R=MMZalk9O z4B1so^4wG45o9*Epp{doc*R;t&e_ZuoYH>J5pyAsMAZV!f?RRZA(Hf4kLn;@$@lXE z(Eq;m6tzcvl1zl6WdP`VRQd3`L|Jy8Nb=gB8Dz5f13Z=$s}YhCU!JR#QIM4`TjB2g(_A z(la5}M;-Crj^ZoYznus?cLnnpu+oX$xp0Ua-)RoM`~Q)&@&_s|Y-DJ(PX^C$4w^U% zwEGd(?MN0qvSu&)&=@oIbvQc343YQTCJ)9Pck#Ml^~`6Q%Z1M~%v9|&?x?k31KW2dqHuwk6LzvQ zzMM*=@#(WFAROM*x<>u1f?IL`&#KKBj&joYCjpnn2ZuFtj4X)%so33tOky?%WfNBwbYBZFI_u))LIehA6`0sVFse0F?4g5Ii*;8*c6LeTI9MIPqPA zaI40mgxAUMCK&OpqlIko2{0=bJ`RpMg&9E<(6DfT9O~Ub+A`cy{s6bKPy;+B*vq;3 zv8-1$sj_GBm0@^&ZT2=YLhWCYP|IrqP~Ao;s7il=awl6>vXQWLcQfjpV7C{!8f|p{ zTL*KjTNIcy#Sm%Q{)W5hAcf4){saC9%TdZ zco+k8JF0q^6(vqo1)2dhsqp+J4zEB+&nB-=CEZ>X%u&2JV=Q8e5tLlJxqRbtZ4K(H zhhI8dVCM(xg)2hGYez!m(naQ+04}Mc52KMRBFXW30rM84hpjL;)*y3r(ZMIb{))bW z2kk?xPRNB|ZuLj%9Bk7yhHa`3CzhcrzSmu;LAV+WaPmHWI&J;W;)ba-QqYg^4FckG z@UDvOZ0DAV3`(|93FFYmlNdw63m#5%_@gCoB3y_z!rH3s2r@G?j__A0{HI4fq5ga< z)u7u1%3{}!ckZl8P=~|iiMAp){e>${Ie!h~Qu0W5(^;PRXMHkI1uvXiaads6eS5Cn z1J^(0ho57p0B5$UEJ`#TmwfwFZ(*p+%kC|(9uN`Z@SqM$^`Y-i2WLV8nsfqV2y-ym z2^;wu+o%NmRUSj5ZxkF#ez;C{Df_9gq*d?osa08NEHCxhjko`j23ee_EcShwA$2s`QFH%k+;yW_|e60Gl8+i1` z|M!IZB>;)OBf9J*K~+tFnFB{RAPKxT5K6?h|Fzq3Ow5hv&%C67a|kQTCaguTd!VS|0if3Y@wk6z_?nRZcS$mY(Q7umHg zZYcqm-!FR|?o?#MfodFAW8sb^#U#TjPKMTLETI2zf0or&K7UHA(9I%Rge>EaIpZ~m zRjfa=xKSeU^X@Fldu&(Xvv0lND&0=5r-LV1^U8vU?wYoHqtqtc(F5CR z`qY?U2U~3fO9272ueM8OKD8G_dW|)**jb_Y$p_MNp(H7o7^!!~Cyy;SdQ`a6ZNEd3 zq38VKM8(6=l?%T5O{vvx5tcLu0t>l;sZ=|9r40Ms%FzCr&TAOvZZ_7^fj>neJ^szX z&N`Eqx^?m3CIP#rX80{qmj?_I1%$&&!2cbOth1L+@A%}hV?dG-QO}Ao;b@1bq(-?e zko#5P-n!`Wl_g$*G9Kbx> zcEBEFIUD++^aR~TD5H8fz4O(eo7sQO?6I=uV7?)(x4ZUGurKvi8tVlw7kvu!VuUmS z+L{oo^w=K$T;ig?0CW7tEe=aa<=fQjyFO8CDk9)TAYoShtf4vfb&24&}$G1IBBW*k^v ze6`qkRi{!dKGV)nY5e2H8#wpo> z4*5(JDN|k?B#EQjcARM!E=YP@r&56+r^d1cgqresOHp;Gi93yoZH_~zvELR*8ZHb;Dd}^(2U*@ZJvRA@0n7ZJ_sz0 zmzOPE{1`0#h*x_8oDcSM(@%zT8=4^*5LT7se zcDVc{MdG>DfJaCd``Htgzg=9k3h!CDUUnU$LO(zv3gh9Jr$MA^o|r{a1oy)&zZen; z!u|G)khJBPZtL))!@nnN>!Aujt?^ z^d|p0(N`J{c%kFPTa1aQ2HK^nZDQY#4(CzDzfjw9L+zc@>y*TUJS$@|1 zArZU|s;my7G znxK58Uj{mTXyD7Uf8Cy^>}0#2NEBM@`aY^{&y&#biEx+>?=dse)NnUhHXx|*lL=*M ziV1tW`+?LX*M+DP;4e#)g_E)JE@vAC#=yV9NjPbbOUJh&Z_>{>GM5a z#l^4T5)H@3lJ`c_IHJFeYpc@poX}TE2k@TV3)F(X14a$h?bNZ>i(zEyL+@E2=uT6A z%j~ZWr(e7vb9f)ZiN@5DtFk`n;7~>z0G_S5U3eM?)sN^=Ln!kJM|9jORRsY;XIa&H zgp>iYL>et$ryfLz?=R^{Jh)(IP(tO{{lU}epC_2NUG!O#>of|c^X2|?*YDd9TdBCx8w9a6B4ah<@a3}PN)uztT6M_f|DsY zWURK3gxWNO*;zu3kyqFxI z;r!v&Jcb1=^E=8-u?Q$Wtx9u7LIfQ-z7AoF(uINQfKGs=844#-qRAr9Q z3;4}Wvr?1Wgsr2gd&7km%eU?q+_@vOQ@CFi3Yq^wePEJaWv9$+l6suYI#m!>m~B5n zd1VB{X6gO*Pi1?|v&|=}b`jEvF-B`^7+9l|sve-oYFkha9RdVLZwZv$h1lIiIFwxA zF#%NA!vfglT{;32&El~!AY>VWAVd!l!Jl7q_lX~=N<8rNuq}R>S4}5Y%KW9vhrry4 zmH*V@2ko80e&X$XU`HbR=Y zyqc`}qlk1fv!({)xS%+@=npHw?>V!my2TKnuF}^5=;tPq!&G!4T!9-Y>I&R%`$5SX@vCX zB_Art|02i-X_}rh`Q+u1qV=8M0d2ywXTZC304B-?GB|ufU}N@WM~1Z0R+gS4Cm~Ko!SP;|_z69FgDSG)s`Nod zv7DIsF10Ev7w35n)oXLzz=|LmOUl+%E~>n^m6g#(eiZHCS~X=}+@k@YN*rWBl|H2$ z6J9PD{vS?BJ9&-T3x3nK4c{lq(CX>DK;f(+JJhs?y~B&6J>q8_j-x-5g=*9pz8e zKK*PH44#)y_4Un~3!}Iu@|-tBXC-K-weR9%Z#s}&*d~a*M0;g&M3-y!PQKddPl4b| zJ7G)Z36o04X0Sn>H+e1+Qv%2aTQaDc3ploAkJYEkS2-(~oUdA715 ztV3OhGXNCX>1)mdiUt5huHClf@>=!O(2XPIvKR@2505xm0!~&Rn9gZ<*B0xC4{KW5 zmn_~RJk1UwN0GP$ttX|22{ww55?DeLVi%?%(#oJB(U=di3WM*;+tyhAD#s{%Q=7 zK5ItaT=pzOtQuIHjRjnTI|J*J@S8-;m&wEIj1=KCVtw0@liLmv;CvRVB2ZXxdoQClX)eoJ5hxbJf50i4Tr+YAMf~vzH2Fo>!G%(cI<))j| zUuBu^S8o59RTd%pxV*#ap=x;Vh~Jo`kL@$VU#WI6Wl*OJ{}V$b19 zrsw_i4|cR&u;kg~;PvBJJsY>~!CR`garxz!H0ond4~@KvE@Ly7wHe384F zEVvMm@_qbvf!Hu68#wu$Ocw}a6)BY-r-5U1bc4RT(?%g9;4rALb2^qQ(0zpJew{rw z&@#GLk2S~LX7pvDtsa8Fcw@;FgN$eyWoWB-cW;Jj8P7>EbH`_o>~QNhW~!#yO&lNQ z67*g>VL$vFdsS?fvyk`hoRuLWfZ_~E(G+=|^dKy?ekVue_W`%smfK;+2iOx&Y}S{f zgA%vH!8kZV4HP(ek2ytLr4Z2NrBP<6eYd<1W1^?FMTuD?qvm5I`2sA(7EK)q~SK z5WsO}*!GNU-^ym3DBAtps=gFyj{kRPk;IHVLg{gN__5|^T06NPU2SYv(j%|n93 z`4J1Nrefza#+i8KOJQxs%+82P7SxW0+g5Nsu>njuua+9GA|wj5qd49-J@NOu>uq4q^9tFvt0E!gzwc~t7DJBr zGGoDSgT$N+>x>Rd1p2x8gN8n+CS-+db@$fN`Fj^KUjL@KxZO02^8CDAV9?LOPD`re z%%_=Lv=jV81nc>51%~AT4#yot*tf!eAqH8|J_ri0<%R3WP}WOBqgIqUh&eEJ^W8xN zIW#~4cW9FuLMW$XkX}Z4r8+8oRwxgGqL)&w%0cr_%s6CNYNzCMaj*mIR$!4=L?~+ zo1|{#IXl7F$8JN+md3!KH*g8`hRwj9XDmq|WOO~!kW5M%Q^%G`%{>BI6)1gGYUC&% z({`y|K*#Ro(*v%%hX1_@zs<_zQSW* z#dtz*p(hxhaKj$w;6|{qdp{0KU{)4&m0|xZdgtR50rJB$6Iq4})7oT%ctj`(Z95+P z!y7qgI$?q6dn?l34g%XPdR;RmLI{4upxA?;W%Bsd!Gz$pCGOD~Yk9dpYV2H+qyfI^{vyde0 z>i4jPZh7{ocV)ks)b_qoh>C)u<+ouop-zb(uod_6F3f0I1*}lZM%B9E%P=Q|x+hbN z4c$6ozZu%;?r=}-hNq7ZKWvRLx_7UT;~*m%zOq8EMkhS`|#sf#WBKZ`Lc(5B`X@1v1ri^KDw|rCX*M->ss!`MH?_RPW%fj>L`9S)Wf-$W>ii%=TGga7YcJ2Rpvv~| zgD`>i##T!7YQ)gQazC@`g_;d!4M-lJj|wv`qWjp81Q7BW&_r=5Da?MX85{3~fj!`~ zZCfe18=-|>u3{eZP>4eQgz;=v9l29&~xMUmzG(0 zw9xX~mSKA5sYpFFL0_7*zL%XmgvDnM+`yQ(xKC%Z;E^>y!5czsPGY!QYk9@QXcNg% zkjP&LP{U)JVC@g;Jgnqm4xM#zxs!w0bE5`Ic{T7V8UJfMcv|bf@ZdXJuj|& z*>k`RK2M9-Qu7_WN`=oNSR;c3yfrMfqOXw6Q2&15UCnPoUYd0>v7K2nu}9E^A18$0%5OV7q|eO$}V(ejeaCL`zK%5g6KdaLBvTE zXSrvn3r8slQ(uT#^#etJ?R_2wg~dZzah?a|lu}Jj8LPb)Z$>xrQVZSyNJyOG1^}`I zLE}s#JC=RtbsU+?1NK8!N%cS92Bpc;d=275)lTT+1%2Rm3(}RVHd}b-F0Nl37ogIk zx;4ZSB5^>63fhU3xrw>vJHMHxNA9tWU^k@kN&fh#)Kw>}cn}LO#E0sKAl7h5W|5Gu z4g(nJZ+QpiroO5WC1#&3la4!$MIA&MCF8dVG>dU7!q^_Jnja39Dki ztG=is4!Ir?{#jtxC=eUhl)Ptao;krPdBfUXA@5$j65{6PZKKmV16bb$&7(J02Fyb3 z0_zQ~sZIxbhM0K4Ll4LPZ1RD#E|W5sj7}Ced-&+IuY#04E8UQIfdc@>wG)Da*JQsu zR7oB;R)bws2NY)m?Gr^iu(1%}!6c+%{)wR)9Hp7WbOe*pSM3AMvwXOT1N8U5cbKD9>A3Ec^ojp?H_MF~d558Ai!c<>eiB69 z8?`&1s;|mV*zV=3sK%VO%d{Z3eZR6Nm|$fs6U{E}bzx$Qg!kEdUq%UfJIVD#J5#x7 z%Vw3#;;FJAl=k4G7_X<$BRtF-iD&J_m+(kW90P<4nXp7Z8C0bNthF!H6{LCqhgPog z)0N4M9{nGl-a0JGu4^B@W`=GA>28#e4grTQQ6!X>P&!1AMurxVP*55Mkq}8iq!~a$ zsUZZU5hSFg``dFr@9*xvGQE!T08Z2HK)C=-f*Lg4Xynqm*m}Q2f70?66)F` z3;MWgF|AfK!MhoD18FgDvz$KnrZ1V#)YYUYUz>@3BB<5sY0^GXh*CHj^_wxFqT7yA zoQ)vUIwIX>ayk1+ps&i6C&U)fXkLKSDwZZoH(=a@t1Hpsa1dr@y zjn2a%dTV3Ne(J$0^##v)QH)E(?Z&=z_J6)>EL6ld$pT;G+`fXJPRwF!-9I=eHBcM0 z5xo^b`GpeidwP91$lZa#f9ACV7wGGL1;F5bCNhDzbVlTa1rtw{KM@2vS1DzdV(^GP zc+^kl;h&$tb)Kz0fjN;Q!3-#V8Gz=7rw+Z;5m>kRWsZz~QdMJ!7S9xo|KB1|Qf zfw&%1PaXMo!7WfyfDc>9-!<==$i%Qsr)*!|@Odek3wUv!dxi5~{jI|Ul9`B918*R_ z*Sr09g|Z?hK1wkN(vXoRa)4!CoeXZQB`02k@@2)7AgHN|0HGsJE(Ol@51d@=O#CGn zPfk6hRU9Gn=8a(rso+#oPfQ~w(hqp9Z+X{%a{`v15tF4{=#;MLGUpJd!fM(cFe;TW zZZ!1E&_H76Ty+5W_)P}o>>!7;a@A!c6gjbeq05HvOoBc)o?lN4b->`;b3B2U+ZqPq zBrRR#^IwU1PgHXVuFUny1V3?*6x|4}x>ee$H8sL`2*eMy|^ZOn}61e7DT>tTwb*e5XzlCM94Hz~yY+;2qOloE$K zOkKF&WA@J(Vt?uStwcsPR>fKJQ-NqK^cWZ|nXTAMS+cFW0}|GTfJ4(}GoEtv$orF* z6#_{Zp^u%stt3*D~z6;4IL7;NV32M|XLXELoW3iney2in%%mgWT4 zN&*u614!_c92z_-eAqA`4XOqKwj21gOY$Q>Agz>r{z}JpgivA*vzJC&gp7D{B>|<; zZwtj}`gND=+dSD~`;HkBEF(%pYV&oi`jT1iiN6%Retp!#Wz=z+r5BXYc!*WX+v>xM z87xUJ6K1+csvmW>5s@C)2Py|*CgC*00+LTJK3*6L$2I;MjiEj(e~r};n6VXp?pnP1 zOJH5@;$xZG{L=gd33vf6b69txGVtxA#fftS${QRq`We$pG{mbv0+@seU+DE;`WL;1 zB}&uVzfe~-givQh?mW-f+d3En`S)yYoy{~mZ6opRnFe=AKuHLfkZ64W_8YhA4Qpiy zh1@vT!cPTnvWGutJt4C+SvPc4BY*s{Zb_t?`o(vqOosS&t8esqrlq@BKeigq+MQb( z^V34r)IY|`Izq;E^ELTx`VMB$hM|VD0zCH4Dl5tj&5hz%ZFy3ckifvjwrMR?1a8T{ zf*buD)9r0}P%5&ZK@H39B8sL>(&t8SxQ*s~don8-cc=uoMeZuxkF|ci-qrv>8B-N2 zqw6)S@eBALoFdL&+ow9x5ej2wd^yWTTKG0W?X>jBaDlhx#^%a;cbqooy)e5M-<9x_ zKaJ|s=f)xFOIm-82<(NrWW;YVb~yL#BY56~7dRbUXYW1VzN4fyHvr#T&Nw%cukmX+ z`&gjH+e|8gxOOmey!+u*oBX=&215CMKfX2MgC0O=L%{lFN#mShPYaJ(0dcFO33X*E zdV&F3^-6usAUQ_>b*DsiEzWURydU(78>dj^UMy!)(0?t_D%l98fil=@RN{c78&WUH zx=&Ign_j9K_p4MHkB=Nu95QEWb3TrpV{q6WPYxy#co?@u1eTu?A@jM+Q`Yz3d$)0U zX{ZuT)+EaO1n9qz<;Lk3maBBGbpLkUeGMzPa`?7fP&;Lpac@~w;^gBq2DvmD zJqa;8$O#}k81W8$G~TH_HJJQ2YK0^o`3C)~C>JDn?PyN|C5pDjE!qzH>217b(3pQZ zdY3$+sMnXC#x#_4>>Jus(-JS4DtwthwVqT@0sYMEm%u9(ub-mCXQtf`#`JjEBNUQq zeJ+1TpOcP0%7Oo_G&DsvVl~i|ZOo<}zq9uG0%P=Bo?1nvI+r4{Otuhu!$CxcF_G(3 z?Vd-9!8^}$#qC=EK_N}2kzte$p7!&1*KUY=+Y93-Dgje?HnzFYQL3wqghdd7dP~;? zY4@}wSiDw_M*TM~+z{u7GEBosud}eXb!m8O#3*uEa$a%0O>QSptM=^7O$V_T z>V7p6q>GMA&*hM8APw414rj(0!x0r>G7ff;4ystyabOFFw^OHrNM<>?CAyg6+qaf7 zmXwYDeu$>R|5s>#Td1x4|6(|@zUTZs{nr@c)OUIHZu}a3e0M;9!0G9Zlq6Hs@^~RC zAj_!Yt0LWRIGHtnN$J$%N6K3A18K#=gRj%Wc*|6#Nv(g~A?l&4Zm;iet3MBaou^Wz z)Bhn+^iYqX?ZNV3pae;%ubbb274Kz#a+iz%k`K3H8C6Zd)a4-Bfb+rVF3 zR~Tu7y2IOJapDU>zi6hk;^W&Q&uOZ9lowm6=m@aG>iu$LA z3fPZt_f1Xo&gPzaTbA6Mf{i#C6>T-cFgHh?3ON66{KJz;d15<#Cx2=4)@zu<3d0Y# z@dt%BW10T)+uiOgn@+&%j-JzivL%Qk4RB{KFsLws0Y!5@(OzXfRpO*M_{z{0e`CAQsueo2C-K1|Nm&|)h-gY6)Aqy$aI#FkWb zm(#lSs|cyXyuoS86Mq`#pH?~wVy7jJi(JyUwEs`h0gQtr)c?brNJ);(Ji4L*9ZS$N zGyUzU9A;uxiJ7;gIFsxzc6)lJD^{f{Y55LapiL85wC5Jzu z`)($lt?pQHuAqY*4cPyR*Zl%_OJ7d?2HB3%m{>V9L0xlniUL9hir~TO(xWEmq2r1J z9=bkn@U$hmMs@Q*GSGTeB^RS{lu#MFThhvS@Bd+IL_xe)XKWOn-Pg)my&e9TtAV{} z^}ICEQhcBH?D?Ud^-eZ{bim=L3)_Lv{6~YZaEf-BJ!NErAK$9Y$CmDFO zT-w7^@w%`d@7s1*6P(FACknh^{~7tW*?HTNHZ-op<-SQOo6~ihtl0?EmzywiB+o|G zj^_B-FPWmB74Fd?>Cp`$2V~tbv)5H=W@_v1nIxQ(EgPX}?Ieg3*@3lfbOfkPlZmH0 zm@fkq=bL|#KP)lDZdV;IJq0|XsGi59GbkN+BoS2Qc%UyG8n@k;#<%SXvSfx$sr90l za~ig({_crX%Q=4ir?)ejGoxnzJLVrxm9}>co^Z=envx_``rac5jZ-KrP0)KqRxaDO zr+9daodHMHugi>XqBwW5ZTNE3K_2A~nBueaNBT~L{hKR|1PqKgQJ3==7|=)}{D z(qeH(KmxH*UCB!Vl29$lY+JI8XWF*1HMfFG*$xl$6^sU%bLTV9{ul-Y8LgOEZqc+y z)mBPmdY_t|D{d3o6HAUZ5r3iV{`Fep=dVf1H3ZlPe;3x{4z+46GSDgUV^+FBqw1x>lfDYX zjX!z`RT1-tuN%&?`;;ZU^76Xm<6$C3D<8o|6genB$EJG%n#BusH2%jOkpnIaSR?1#M7w|6oIA6d4UZ2QUpSbgQ?)j+`v%_qzGSb z4L&lvy2fmm}v-|JEpc1F6otiyGe$bhxTQwlwYfz+BR(b(AsnRpVRiBS z{9233f;oywD9qNqq$O~;+2f8X>w&`&L72&%I3D8TM2uBi7}(e|O-`c&kr4+&(&o}Z z+v(@@&1of*HqTNHJ_|-<+kSB7FJX20xGK`Tx)tGQn07F^-TDEsh>P&AgS^|3vb_Hl z15O^(QX|vpAb0 zPzz zXMIj%iy^r4bLH1i%62b1SE@Djc&+0RiGJRp&3UlP`E**+cG;=#u;jyyyThS9+rA72 zpT~7&8CM1#-Gz1cSrrY;L@uXPX`ctyyVH~5t)-D97jpDtU)QVa`5IVn9|~lS)mxqv zjB&qAFWQIGs(@Xt4dGs|f>0Jkk70{_kFVN#V#y)2#m3{qt)&za*2pMI%K6{f9xI#c z(YmWGZMoNS|v7G_sr=tG8pe;n@K~WP)TK33Qphf zf{)qKs|t$FZ+dQ}An_;c!*8|8eBGV&6mDahRLs34b@LIaVQ8D$zvgBAl=BMyW+K-; zGQ6XKx?J}8jNy~Oz`J8pTV9v+T&BC(f2%8EvbM<@iCwmpSV&#*#J|#HKfIOVypSKc z^Wj@P>+e)bhH-MsCw!R}K_r2eNo*|*4WCU?rXx>FHJ?Wstyq=PqgeomZqfsCi1Nfm z(7gpgf(>L}grI-<#4?p8G+>UPo|m?B4UNd=Ia$hdKS^z4N?1F;P29tb0=c4KeQVvZ z`7kk8ReiBaBI{4lO?S3gGSsL)fW z*fmF2L>FdPG1|m=&+omx%%arDloD34biY~L8rR0;K}~k~-HU3@RMX~sqZfNpQpecl z7yjjIFm!UDRMJ{5A5gv?y*1oGr6N>8Ezt2^`eLg>l;@im#hTBHt?_IKKRw+@yt0{^rik;1pEx4n&9vZK3gs+=M*NHK%^_D)bOC`gzOgnQpil zhm9L|cKdV}Df1-cU$w*&^|7-Tf{bgu-VtBe)1{kOIn7jmPTD^A`|S@O>;rG`yve|F zP}GQx(SEv?gq?%e0C@RFzX}6;$(qtgT$zg7Gs`aBXSB=m3=@}+sQPDSyw1`vIf$sI z?9Bu2(m`7z)*((gD(A5x7R}UCE0bYv9e$(iBEc?xbZ}QdfK-w>Sh=(h`I!Ajv7Jh~ zY4TBNKbCIm?Ie-Qd8HygT4lCSo=f%BDHw<#c`FW7TQ1<{A~5P+!~czi=(CQIaET0>fsMc>~oNP(DV;OHzi+Dg?ezHjxo?ouEomm^NFZZav(dh6J5MGD6ZB zYrNTK(ps#XF1#7iAF%~|eUtiCrO4o)uHn<89@mfB&vf(%PitC67Pn3MV&nVTJvP5| zZXZm>tUg~o{XKTQrRm_*mNq+lBV<;L6dL@=az!HG^U{G{e`1Z?O}o-NI`76#e9O-Y zO7~&eGJ#JEa$3qK?~eS)H5}vAV%8bCsA)+vC3(_PeSdwIVr4~JKhMM8@0ZXSD-2W* znPKDOfL&UiUT|!U7fK5ZHnj3?PJ_c{3>*gqDSWY9vG=fD;Y#Zzc?uZsBw!vWw=%oL za9^A>&dZtfse(@^6+y1Fs?ccQx2J6B<_3hz883h#Oa3x{{Zd>Nng7%!Y=q$UbG^9% zm|)s1Cj-lW&pYOaE+WtxCp9`*R^X}j_jO*dZJGS&Z|k?5L_~^SC%aaSJu0Pu{-i4r zFMVviNHVK?c5_b^LzV9mGJC5}96w!j8Wv1+8N8FTHbHqQq{2jhxf;(~;?CA=Cw+XA z2-ybm(Td9dq?wn9eGK^&{2lVveZrkshc(6wU9R{|?A=gR3d$2{*#>0~A zfXy_H`N!M*a6TuqCGVXmg~-L=2rOzxb6Aqb?k*2MU>iE!07RW@&fdli2L@*e6NI6F zH=s2g-Vlt8eCirDGAewT1%%kZ=kifG*Vwb89RtVPrvvxkA$)(O$XmXDl?AbxJ=;wC zrFhl$`#*`P>g`(=>$ix5(w}KWek(S;k|Ccp^3^4uFmN6L0*AG!9w9R2mey+jg4N&x_40R_VV2|Z z|M3nI>mM?$Ls#W{wpq`q{_-A(%{>mB-|GH1o1~uiw}JmsuDW9;LXpHk#%gC^nSww+ zUEAcANqBn?okJwueBa?+60V-RhaGjJ-MZ z^=|*a!s@#|jYgth-J|zdCK2c>hFk2{suz4In~njFh_=89=nr6s`@jY#6QT|^u7E9~ zV8I?P#(WT(A$|2qxJEsgq;dS`4XfI|Me0JEkj2dNiKBU^_L^r*szOXsLN}i;wXlO$ z9%TK5!?%yeoS*FwI(UanSfdUr8UP_DhMVu-K#LDkwY|w>&K0Yu0oyBF(mkb&ZHiew z;JHtp?r#>zBpm!e`oy%1YNmf38dsA;}ghs22cf1pHA=og60+`0>0}3qJW2=pkB(CS@H> z-xEjoj=3gS>`cGOXJAPUL(N*z$sJ|f&Lj|yd0-1@LWG~BAK(eJzTZszl!5L3UMDa< z({IqEHBEHkBv(y_mhL(10hFb{9+$1c+;~jSX1hO_5i9aB2_ahNp1$;BMgO(A?*z2+ zBJd3qc^Y#224MaII2OwQx~jn~#Hq%Spu}0yHM#x+6O>}3TmU0A9={6<1Ay`IW1df9 zLoYJ46lA(XX9e|>JYjl?l3jRc%775)%gIIWv8T%uD2nP+{2wb?2&yRi7!1F*NX80T z9<|>jKm%3-?1e4RRz4PMT>64i3a0!b2nJ2Kio|@w2Q%{Jv(kJahHsiF)T;m!u-$o8 zjB~X+VAIbFk-{Pw1%mS>MOX%?-lwkh*Y}%MfFL>}*|#cj$uv3#?YSjM$$^}Rtg682 z&MTg)0Yoh!(2M*&^E3uM)IVSbISM>wyFsTWnBbfmolXlq9s~^vEF}a`(Syk+8#8AJ zWVwixV!kS<^{IRH28_FKf#E+-#iR_8^Y)^66^ljWo%ILd_-O8p^OsfR7Y|6?@pNwZ z&->exQy_CTWh{~aD&y)h7{1!7Wgxh$2z&9z%0m|w0Gtv>I-u1NvV$-Kg7zF)oDHDl zKixrBT1CT&AbqLUdk{8fEv$`3D+!XerE)Z;4=w*TSEqbuJr2&BLGS1Bi;V!X986>^ z`WZ~*9uy-b3k9#*>bVb)K#)mCjD(ue9T3e`X37S_^eI8|t_GkK&tb{F{Cl834~*?k z$)&?bno7>`(Gh+E@0d#y_hrIR?x6ZG6pjeLjh8++(((L1wV`QwfPPenue9v0T0sVV z+iP-4ciVj6Cj*lUyw20Z#DY??sP6$6N4A)^MZN6{p#+rVQ^@t{!8g6SHj~i#D(D%%diFzB8hz zjO5(ci+~Q5y2M%S%Is^8*FAE0mHDZ8I0Bn5vZY}D2^?>Eosi!{*3rQkmo!E(sOE{A zCB8lmuTcAX>bFBRZ!LpcLgOQr|CWHwywYmn1)x8bCks#8c=Q&XkRHrsv=c8I^Kx0bpip zBiwBVlfkwl3H8Pxj^dsIr!q1@#b2O{Wx$#Shon+C12PX5N@4Mzh2^InVAqwdx>Xgr zv{*njH{!>WO=-Xq3YpLROIu+tzGs2HVr!z^?o)(*`j$N{O6qKW6GBb{zmLT=hdjJ$ z`3ARp^i7A{Q9NW_e4p@ZSt>#Z^I5wV^v1d_2ecCuReV(C*5tua+CV0q&AiyXMJJ$a z-gE1tD<41c86(tfIO%=B_d5q%aAMM$urucadnT0DL~}s!l>hxp(1=F2G@tq5LU&ty zb`UE0J8`TC;l7V!@N3H+;f8-h8Aoi+w?Xb8VQE_nn!Q@J;84Y4YX>njK2hS7UTO2e zA&fajJv{yY%Eof_q|rX|=>UvW_nUuXD@^Ei^wf^}y}l`me<` zFrf@(=rB2_Clf`4y4qVa<%Nf)!rgw)_@BGDk4t0K!Qk+r4}hFs3ks_kFTQ>e7^ZU~ z_C;4z0_cy&($fSMXpW>EKqH? zYKMDw1K5;v+rck&fy*mq$M>7GlqDpAMAM=h_&3QQ&@O{|j>DhHo{axzzRy6p`U>=T z65MeY3ypHA-WeD*JFQCOfd`FxZEbZuF=y59|JQR1KaTR*yzaF0M;Q0Ug2v$>%4*j%Zf{tk>h%mFjlA>`{ ziLbfiq*y8Xt1zhEp$~s=UVJ1O5}2)&Gy)Er-R=QQdiw%weCiK!p7Nj^2$f`Rn8X5%+UjZX3<*XY*qc3P;Kht;39*Q{_n`G3tis!k&RH88Q3l#S_zI{&ih^T3 z0d(DkIft#`MK|jIN>E+kAO=k?6D3+6a%l4R=U@K07U2oQxOB3%$N(7-ycv6z`I&&w z+Oi7PZcy?Q2BGha*?2%E18%i62&m_RC@=!reh7qsp)Kb3Rh$r%r$#eff%dVtQ$Ssj z8bZn7KKn_I`lJm5zS&V#!UlgBqds?CF0jIZ+{YvAaOvSLe1bnI`SYQoLd1EeN}Q| zQ~*n=3m3MR5<(fve#L{zPmB6h;CK}dV+H?@3s6DbJ|b`*tU!#YF%NI_A5x@1#o%JT z*eU(=+r@3@LvHy}Z%eOCVo_QDiAgx3qIfuTtY`EufX-7)WoeGBXKd`ieoa#U3PTuO zpno@B0R5}+d*-xEFiR?o3pCBe zg4l`5Pm}m8K9mJ?ugZxT4qUM(#u)ChF%!)&26HGcvyPnt#q;ZV26u2`Yx@d*TVE+R zj;brp!wG7e_+hFJS>bTXRv`bGQNUo2TS$Udy=*CZY@}%sBTSz@&O1gOyk865&nLi# zE_te78x;X$vPyX0F^BmkAff^Je~&j4f~1g43oU-4>+^hw$G+ytSkpOT7KV&FdRSt$ZMV=VnDD(Btm?Z_W4|0a~eRDU)bP zgcAW(MEPv^xnZEWma<6cH&p*u-3Qy=nL^`H=DwCh7GG@l2OEyHN#ohe`l*3JM2-V(r=vng{p& zRi(n()DrgK(>Mpm(nG%*M- z5dGQy?!5wCrJ2xrIg!2-pT-F%cl}!lR%f*lj-)+E7l@gaNKE%HQYNETdizP5!P@MU z12S3T@xOmcs3S?b7P8C$D*}D3D>HA!2#S8NXP-kQ zfO!WA%2BA>{U-?(vPdFi6Q0bKSWx5h$mMKl(4k1O zih5L+-GlEUjPja9<;14{_x?p1R)_x{4GyN-y=1j?-B|sF4NWW0F!{{q60S2VLOHIx*|i+kU_HY7(hAT%9&)>i0@>{qzH(jhNw99KAe1DeMN>EbSL zT*pX9JO>XR;_hWGL7xJhdQiC^DiWN?DfG*C$(+~1m_rRLlxVWO#w7c`ksBU2nO-vC@t28W4Q!)$Z24O*8d|1{T2f&an5*{ z4cM|IGBob$zbSscictg1gbk}?gz7tAbM+RP4 z#4UxUoV9=uEfT$yp%s(?Vz%~g_zDn74Ut>Wjqg0S0l5`;NKT>`CX#jeMzeiFH#nZX z@atEh;)DL2upN9d%xxyvb9-lE@=i(?X}JmP=l8|gaWmHL>7d9ewiA6wUpzPN2202x z^kbo!2QmjV6XY8ld$G-ij*t;&rhg2M2Ha8EAcQ36`TncpzEIC2gmf0KaMe*^G;nPP zAhV^@a~FK;%XK3;VW#}vG%wu}KGMQXt5>#CY+o3*=ag;z-}e>iwsI}{M-(EwkWii3 z9~L|Flv>Glh!!-VlCA0F1ft&8iV`DffzQwy1!1yfSoHPKz5o9pEctVnz_T~Oc}P4% zkbEK4+_+c05F|)tARGL+#1DD@>+R@lG8k9ncwOMj8{mSu8FGm{<=2e%4yuHr;J8#(5$y|FQC12|*Di=20O z=#7Bg`QvJ~55N0bR)wtbDN>9lLzhkAnku{}J6oDVA%`95IUY5?|1>ll9X3sF z(IsyM?&cy9XhEmV{d+F(>$SIC6qra*TSV?3h(|#VKOSYYGJjW?6d7%K%|fkZPHM6mwF`jf6F)6`Fa?@I3DUPvuvg6 zVJV+*>aY~TjU?7!t+n&CYhfAxoD|=vni`H+EL`)=-g=L$3-m|Qf2hCT#T?Y1#Blh^ zAM)-K%C>tDKw-8Fau+1OunKe5IU)Mqevs1Gd&s-3nvG>#0sr|28TL^o;veFrgy%#S z7H6N{g)vStd>en(qnr?H9nsLyG85#NvYbP%U~fh#bOU}@n|bg-FnQ94%iJR*iA7rK9ytlfbQ~Sw>b&dsUG_Vq}{jP-1 zf`x!6vM~QNEt-}1x|ajl_`3mC(q3^w!tO4ir3K4b!#ne$9_HI}Q1*}!T7~5KnW`=V z{B$JxJhKq7LbrPzs?h)jH4Cg8yo{yl$Gx3s>Y-%xfmxhp;4)nH?Bx7XlHak=RLF5Q z1SryOZlrs6Z#CN#b3&+Q(B#U&8Q~oDM{L^`Q#83{SI12tP4BAtj|)cua038<$Ng;B z2&28`a^BcnM-sh{UwH7Td`Mjl$KVg1KB3yR*&2kVhrif{eJtzBpbMhl5M2OmYgE<^ z))W+p(U~s;8&U|pTg#~>C-F|RAn!p7N&P)iE?+n)7QIJ~P8$^2)nk0eGAOrnJY-jj z%u}9(YG#tS;mr|~@DJm8foQBoxKHjsfdc3B6kNe))kt@z7g7sPS!5sTsA@31QLzww zMB@5g%W=VKs!JZrj(@QMS^7>?N(r^bA&2e7QZ@`6?XKoLDCI_Nu1y4;3Ce7oJ3_NT zjyI|+Uy7Y|n^Hq&cZ#UIHAx6=c}2s}`@QARxI7Nkx#OYK_~Hpa{vc}0R4w#f#%Q=Y z$HKgU?85c)`_9-dBHlZ_%OtX;af@n5M|9D|~LfbmyYG&E};6o(Y%^%t-ZeH;uL zkMt(q8gJ%gT^JW4um(Vi z%s1__=9-=(9EE`-`QYlDStA*g16++eGKxe(b;Ar;ik(a)T5PvO zMV{;vtsc}zGmp(02CqS9s4#rV)_D+)`uvey%6U17z)Ek4Ul-C$!>bsig}MHbz19SC zeAUFK^YjlJ8y0O!tJOSK6!^?`3yzi$;Vvn0agV?t$m?fR{CdjL!V%uX((rE+bDj0K zuqq3wG9Ic-he|jb(S^W@FVy-LP%wM<)Bz&$=oOIY_K)lyTeu*wro#bGuH%uI{L=;U z?~U){YDP)XLh3}@h!KfvCiSYWptvbrO6X*`_Q{8^tyYHpS_ugzVeeD%iSG5v+{&;` z2wFF}#=&Qu1nRjDTsWK);8;G(Sp;e* z|5Uc+?FH{f4!j-5ggb{dYdE!w@F3I`PrQIwIDj38^ z(s_@;s4X>cTyjz99J`<#}HlvJ>kWtwY zD#k={k@-^(`F$7{Nb|2^QIanuOiI1=oYad}9G|)2>Ncr54mYVkF?&TY<){Wh2Xjg) zth5gi*6bEY9?j8MrP`}1E&;(KUqS6XO0i8Z^+G6?q)ZU7? z!gryzA*Ap+G%f{<$s^7-=DR>R-)rWYkd$H4xr%liS9738I{(C_wOibh@2s`D3U=>k zY}1x(00%(3F;Yc?l45nf!DM{_gD}k>>Q)Fl+@WzXdaCblA?bA6zFHReL#LOggvkPm zqWqrmOxCP+k^JUcLZk5!&ai4#5~123MWXHsb|{C`M;B~16cpGyvDi+7gM-X%Ngz4hf5bgDnF>_twAlrGqxQ}g4dT^pMT zN*(5M^Lwq~nEuC!Iv&1_ThhkwJ$`wygU=M0v8ZU8x4Ohgk&Rij))TuV=3qA9+V(__f4WJ7a=eJ z{)=DnEX{DEQ8xn>)3-}q-2@T1TV@_Y9@ zD959vvwmv3h&?m|{=a)n=)fjYT4iR)<4h))tU^MC8Rm$OlN2WqkoYK@+BY388D-@IRsk*Ljkj-W*i_EE*iUjW3l;Pt;<7L(EJI2gaxephbz;MZfjTU#;=Dfh`Cw3KK&q$$~$8-Zt=E7vZ)*cw|r??Z0$E=Otr3LCW zgDWkW4+qRWrB1d3t|nnmhBqCYcZ6iYEXMpHd^}^VcIx?0;NZOxy#FW&)ORQVnm?5L zQPeDv{G25+4WNAFjmuC4IeT%j{MhG5cRUg4T(=8^L{x_&-ozfNJXRz%dAkwZ7C=!9`Z&JgosX82K*{ZzLn4c|B^%7W$YIw{u1wi&mOxHy!dLrSFRo-g0YQh6G-7&4x5M zw?{*tWn#s?mU>-qyU;_Dj@0+pGZ~Zr^k!bM{#3FI$I64~y!$?gMG_+2=i?G@aVkG@ z1HKTbrJot4NWngb;%qbUT+JZa31Bl@eSD_TQZVa!<}>VZg!Fwa=Ox}f`&BivatRI% zWRDKOsmc825m0v0@fzHGi}l@9{#D z#%5Y+!1@VxMV5~hetEYBc5EzM9S0(b1PFY)d@eZnE zV$Tf-Kpe#&I7&48pYJz>W*JF;4g65f{25>zpbEKa)5A&A-(Ad<`=4tgRh0zn&*Q38 zp$6{io!L!=aFlSsbgoq5UeM7H$eoZfAMKzbJ4$S$ud%VE;{#}7g}(`o+~m@q6QF28 z95b#NK79=@2Pgl?jIxaD=x;1mqq~3d$n8?*d~`u31fB6+xiK&;Lpd8fVgQ^iQkB-% z&QQjT3PQcBPrxuZBm;zwO93S7`IT0;X-XIoYvS&h+0EaUc-A&TQ?fS?@1ZA}@sYba zz|s893c#(Yx+fpod08cU6A$BKN$M7q*tU$RXQ-+ogfR6pU}%FtUzBFRDC=he7SgwM$PJt{^Jy+*@H=>_@u+0q&pWU542HeJn1P5JsG#4qZ|EM4bd^ujfOLp=5zD!kN{e zl}2VuE%W@uIij+YPsPOvD$JqeO|L2!%Oz{nsN|~O$^oK3Rs*WZFMs?CTZ*#@cJXeJ@2oT@ojheZWcU?7Y9YkmlywSqO(VQ z=|n*2a0)JT51Tmhm(sTrhV8FLCOe;MtX0b+E3Wc3nV)|JQzGqJh=L;t^Yu4cB*|&j zkBoj9+-pt9mw{uL6lB=nO)P$uetyIH(^E3fU-))>m|Xy0vH=t9^+##;ffp^xQ-Lz` zCjRK#&bS9=jUlWla2x{J+Zrn+076i}23IR9=-zQF06_{xL?-FPdtsn`Rxsxk zp4>Rd)%;r6iN^Ka+j+H5K-O*P&9J35+7J670R+?pNp%J)fYMU29Jhc3pouTEkVMmocvK|X{yFoLLQO}aA zeV;Ieh(eE#X+u?J#8Auiag}(eOVf4sdKqG5A&yBnABQz=LygCRpdouSyjtK02-Vi7 z;(%O!6rDe{-S9(@vcByU%4*L}{iOVr$PLf>0Y$YWvpyI_hAL}_i*1II=fy?XXI4G@ z^dDALaHcebK4X-bbCOYcz!3=MXZWxO(n*xUw>fg-J`HNMZcv9};1 zBe-w$yEc%kks8h5#JZGHT-@(1heb)gdnLt#k#SN`Fd`*Efu~%KEi~cCRIt9Z6ZJsk zDbC}{R9**5qavIiw*Rh6PU+pt(^>ur1jn|tHzi65>DY3afz_?hU3%gA+d&x=pC7dy z^PtWISIBO?>;D`ya>sR!p3}8MJGA>D$egABxyWmNB%asqH0w?YJ5=Diiajx0gU-UG zF7~JDe939^k0gO~c2#*RliDX!cFk-TgDb5DPwm7)}}sOfw$;W#(>H>_d~;RhhK zgSMrNAqhH&P)QDNIh5#Mxm)|i0fw95Hs(9? zg_5k~Pu8<9o%zeX(Y+UZC_aIM>Bha@v_A)$usE^J3)UqmcMzPR{ZpvR3Z!A!>T0VC z?N4z0Q)@*SY6>KY0I2ndxKlf%=Q<3a#ye}u0c>YL6;6wbtsI|lAVh6%pI(pibSfGC z=$a=)%&d~&LDSX4u+dOWA~#^+)^h>X0HBJaCyL9! z)<{I-kt8pfwwQR}acBa?pM$kA=k7;9l~=*dOO_JK7#p9P8vpb#W9&9zTDZ7vXT@~t zEopAsPp|=u6||DhtVjSMqcZQpyEU(w1+YX0fi=2xK9&~(Ykc8}xbMr&!}z(9$gSF{ z^;$n(7Zgb|aTL^M&2cy7`J}n8EjhtlhHs@vt4}7W8DTRBuW};5>`A-ITkfUoF5 zlSpN{U7_V2Soa%HxAKeF)^IFc10vW4<`0a9FA%w&nHm3NE+wVqiSvKRjTvm!VY=#7 z<5EgeibGX`S6%$2=eF4(sP3r}F?y2E*tqF_HLdkBE{3WfvaO&~|D1aoA^ndf%no%0 zu)CyH7|0gY6MHsUNMFE697LLf4-dYhc$ZEnX=aG9!6{*aJg3?XPj z0w=ao;Jvert;fe$5xEN`VL7F3rcnD+M4MrEj)%(tweWv$1kiwCxZvD)(>f%ES!4UA zHT}4;!Q9FsH@^_QNTc18K^TeAhq#F}ON8;DbQ~8bi9PUE3ijN0Toe}86A%qPY=3El>9+^gJCX`SoB-!8Cxx zphzlkFVADQ!@Yv>D`ZAr{bw2d-NBOj!6ZXNaBy`QL@$563_Rt>I?eDO=<0E!X;=M+ zW$0&P4F?znMjdkFT&Tfskmqn!@7xK21=h$uge8*Bk3+PY00%A6^5d=!BV|^I#b}@$ zXy&wNdgrY}fZ2oOo}>Z>#~Ywcj^zWMJD`t4QnivcyC+?&LS%tJVK%KMyr1f`%JAk? zA%VD5&1j49ayBbgdxn3)=H4(7#{=$xjHWBq6psVwTT4s8r6L3Mii=M={OQn-hbA@& zv*3^B#Al+X58-G5pqJLe@_)c3&8HuS#7gu^wkLiGOr@iAKz6$O$h4TFx`uB&yY+PlhY#HKdp z?f}6_qo%D2ZnH)Q+IM!|uX|=s)IE~FtaqKrEc$a}ti-AUK!ZI$oI_>{^~J@i4y~wn z@(G?K=uboPZ81_7kPaYPK%=ywgwbJ|*2@+n){k{V;~oMhm-h!^O6nXbhqL$-c*%{I zR@-|D0HNwe^#EpPvNQEekroB!d4BA0unR3z>Y0BXCB-OX?L{J|+Cc2~XcAb1Ix5<# zn=_o~n-S7A!;#w>91`sB=VVU#9a(>HR#!5{Zg=*uTri+0GCHa@#^vd-Px+mHB3LCq z7XSV73=h%)Sq+q(E5j}#Z7BSr)xDQi&l=b^Y|dynX=of|{*~xSqn2Iyl0E(dk?FSn zpzCckAf1LR`N8}_)Ei5ID6$F}5(TI_NowKqYDjkbA%S$tOw64x+|_Te8b0bk8K33g zb*v}*60Jn3J1&%7IP=cOAt~q_Jn-;qdQ55U2><pXjEww0!(u$PSiXy0hAkrW!QYr!>0@AQbNK1!!W_jLo{hsTU{l3p1zdz2o zj`P=Ed(YhW%%^K+q?pQ!4<2pcM66wde(6Iy&Q6AP2|w?0qN%8vRHv_EBp7>`g%Ycy z0i$0bW_fAJemyy8{TxPW9_mOa9c#vU-<^DPiKs8yJ8aebG@Je%_p1wf#TL`p^`}d0 zXqHm>kJTYX&wl8^LREvxx8x7}4WS1++bSfgW0r`XE4|jHaGoKWSmcrt?!e4ey!df@ zUDNFAkK`Xphs(UATRV;xy`G0z9)290oD#7+*XgUyStBrK7AjoFfY$q2;81~su`;1L z240Qk;W?dE?UBjdQ$(*?$@PD1LU9^aKx5(zD&{yOZwyw^JK*#8kz>*lOY=t3pK!H3ccVtPSjW0L@UUen8 z@~zRLjCvaiFox~$m=|b^S-IkBWIq!l$sU*dDC=ylQxM*8ijKDY%^p32eKi(&6O%wuB5pNUO&xjJqG{+bTINBj69TIWZ8G!$O@G`` z7YZ|pA0Hzrt(V09rR}DRI~Sme=u`)RDb3>}pZhYJ?T6m1+vrWwt2(#j8N)Jw);s)z zu(Vax&rnX^UjXYHljD9@C$iXVLZ;yD1BoQ}t36i+(bsZoIJ3)yXNRs9SmWmpsn|lC zmi zeJlf`BT&mAHDJ;>?QLA~LAwV@SN}!97f<7yjEr7L(IRIamuxz5JpY_p7`PEv#Vxg9 z0f7m=O=!mx0j%oaDQlXPHSi!YYBI>6l6+0OMEjC76IOe%_8NCHjlHEpp3JABV%ZeR zXP+4xIKyN(DULWLUFe{)hX>RS3Dz=9-Jt6!Qur>a?tyl#i3xZuXWwmj)vy0d;WbRVR%$IpsRs?L3SA(wzZ`WYXeeo9E_D&bZ#0OeW5BO>ST9l+f8j=luDkFxgmeZS+r1|i=vtgoV=Dr z41m6(Q4G!K*Yj^G={(vmY%tBHjPSxTsMZ_fx&`km5d&Ta$MUiBLGbiH9jK=JZCC>H z)SCyUb%$vBjz5eIfO_cFxno;Heq{Nj^M#7)b|z#Fkv0A!97)6UpyPgXNA?$f;*)+a zw3UlbWs_bZ#>RhvX1gQ;%YqLka2>mz8$4zLVO{)U`aO&7G>m7%1QQELtJIBzIp485 zS`jmnlQ81JJ(|`Gvoj}1G0~z0%f&fc-7>0iaGm4+DU4_|GTxHt=k`k%Xl^Viot8`Ve_vU03j3Faigl~5| zD|!X@OcZ>R5uJ~fX2m9+4~w+I>E=lvv!{!=VG@zRk<2b@+?3m?-B-OW^n=J59yDJ) zF4HqKYXoX!qUz4ikk0ury-~r=$JoYQi)s6OZe^ihWfN4A5{3k zJ2CUZo41%ao3zQhU9HWnfb8uWEO3~MpI8v@QI9Re7R3mpzOOb4=^7k!f~iUonW*jL ziH*4m@|Hbi5oE{cwAJp7$d$ArfiZihiU~7N{3Kh74LDno9IuULH6CpVzsu~17wPCH zH;#eL!1U%Fwr$}=d7c8n!XvfNuR;o%Q`;V-ML>D8HIF<0k<5i`G4y;%rBzPrO&?+V z7o5RwnVO?Qeu5E-tdk|ZB#J&9^@Y1BXyzf6p@{M@T%OP$=cv}9e>?yL1$c~bbFU7wx*l~;Q zAS$Ts@d%*-zA)TMi%WP<yYmM3z`o$m9aABMu+H!)RrsPs=#*BB#%vtGY0@0O&j;iR>Frw13QYItTn; zXTRbGvw9>Vkfgw3@rAkb$uOW0cg~3H4dQ)L7NTNF7jt4FWU=y=tGga-wLgo{TL})0TP}cpHWj9yUbb*lRk#Rm9vioNh!te zG+5urz@(oqSxwoBTM#<=@xpnvyvwGy7ao^Jws4THHJcdtt9V&?`ZC*vSx#LYO}n;~ z;awyy7P&+_wh2CNV1$nAoR@1`;2^Qb=E;Sg4p7oT)i;+EJ)6)|p?%bz5uYJ8el`X zPvxr=XTf;GyTNnJ99%Q@qwLLweZvayP_Ln*H=AoqMVtM)*2gZ}MWAVP$J-wM>i5jG zd*=h$Iq;`^k9xSp_wO=2?^GebAbYf-6%*%U)ggiYBGP`*xP$bPM3fJ4;7!npKO(RA z6igEG%dnkDP&2Yvo6yi2pc02Q5n&6^m=HVbMgn zbcQhYqix$wxXVP*mA}*Zad23hdGp2-11|6EhM6F-j}{BBDD}l%7wbxbPxsYqLlt~U zGMblW^1JYx9HFuszp)A`T5r^xG&if=v#DiHMw?2vTP-I%Pra%C3c4j9QX>bv#tyHI zpodbmgtniqwf6+Cyh!ljl}+@&#od&}3ubB#4wLU-#|y^pN-~4aS{QS8x&-=4AqH;W zjCWfhjW|n%gymTN3sea&U?ix;c>UQ*IY5a_BiB9LR9>CAU*dnXnrT+|jkZJ&tbN8X z%RCKe?Vu%xGOH)|23@_=H4ILhp(p<|*SGJ(X^7+b9v?ClIOw&T zqd0mdkzB#3@*ZQZPhI@=L;ekiC+_#$dhbpjJU1*~V8>;f>qi-)DI$O}D)lcncaTUv zp{|tq9X)rlUQA!LT{bn3*dlqDuN{|ddIo%4JVdMXO`(UTtqP!YLb{4Uaj0-AbXN|+ zD8d;(gM3NEkpQR!n<8vd?11s*>%N1{rQy0w=8L+&&*$qqoWm+IfC%Y~ zE2>o=$Ik;SB<~=z1W42QbSRN8#U$M<9-ChGf2uQTh!cAC1W4wd^*ay} zYf}V9YRr$FKFdBY(An>WxFj3(>-++9!MMt6vFtLI#qb&OgqQ2h>#mQ4l!}#8Cf2p8 zqeEc$c24a}Lu(H;Z~PcOYS6T~VP&H_lmSz}r=bn_>a?4R_w;SXSZKOdWp(sI z>AU;u(X(f)xfP_KBRPvNv{5obw<*H_O?TwX!ft)DDf9`OuUfv&OBIC)_4dLXXJkuT z4)WXx2ho*})e=-|ohC^E2&(pj>f0G|bd+?a%*7W^-|4bwQi-s+iETLX<9LR@9?P_7 z)Hx#6a&jMuOD~H)KYKR(^eRb^odn(Gjr6?7J{feToQ>3GMB8jpHv?Xh{8sU~Iz2I|MJgST z0x4k^L7vqKjYxe-7%+24Tr(@o~~bI-Kw_li8krYTu&SSXfr5o6~5+?O|m)_uOaXr=kdE z`mXiS=w|e(fIJLEU;T-I=Nn%d4@df=ppwk zqW4ma_h>-PdZG*&dhN^`8p!wJQM799Dikhl7s5q{ zUA}Ia%7p~uUg2??=a?3(fI+=*%@InMJc}GKhf!HC<^Y4kwC}A86H4K+_CA?TCF7at zV(L!0BNHAZ1?um*+(ygRx&1qXnK=U|0}xtg>$k0J+o$T{j!o#Km=f(aDUzN$yDk?O zQ(+*r?4C7qn6ia9eUodl{L3INaJpbtY~F%cL^3+K_BC`JGtDyc+$Wjd7pMAh!w@Zc z==tPw)ip9S3uK8$efjHbh|a!)wTzERcg402qPhKHS)hg7hfb(ODA9r%4z@cG z55rUDU_99)qekDpr?ri5;3g7!>%+{k8uN|sX;HJ82=Pb^?nLC$ zJr}t!84&rpu{3fg_4vK?6}=It*}-p=sJ`d) zA}`FrHlJhG-Yw8UE1wP&uGbthgeMyoDNg1_`F^Kor0FxuTXhPjOA_`tigoQEK%r0cOS?P_S({svgwd4~Cv{ycltmUUR_ZJ+YC zvU`#m6uUeUn#Bk)6emPc27TdZV%HjZ>-AXOFw~)HTsLPSZ2t7vajO+p($uE)UuW&! zmU1__O+^TX7X)3JN|UZE)q8i%5VP7)f#yEz$1~#O-r9a5WW=sI`BtuSwV9LbqDCp( z%`a&vCcC9L`!>>YSm{ZRoZ)UXiIT0RRB&Nj7M-L*V2*@zF~EzD8CDkI>1~+j+Uhz# z@zu2+n08V&uy#cGrYkN@Q_!Yet}ENy__|-{+%Gba^%$f1RTO{P0Mcnl+aDJx#6<*) zPTs)-#;hUcPSki@!kmtZczH(=r+rsX#|=zjqW$T@Gh%Hmj0}(TwVfjY=1!+t8{`}X zgf`j~=56U5a=r+)$-Ulg?<;khmj4y)!TQpZd*(ReTJK&0h3v+`#U(cIe1O5nhMO!ie5abNIlL=gS&BMy+rRfjJ+fm# zR%9sXATqt!1I6tcULr8TKQ@Dk%mC&Q|NRMd6bJWDQ};6}&^vc24-u~82KKprFwStd zLsxf3t>1p9bj24V*`+p_2!Q5aQ((0`dR{Jyx8?E{wg0U%=Ay6S zsS1wGRlWXX`Zb-R;-T7F#``5xMBtQ?pJD5}6H(t4WGM%{=x3rPwmZ2~48*=PP12{PF=b-%W8=aDoO+m^Q8u~6-X7kyzjjr|m**N8g2*%xTC zE`y=o_&QQQ<%f0JOS=<;ir`hBz5*GjK7NCj)|Qy0fBt4?a}adqpGNHk`u=Qm4`)jj_D-iZ$Ne5eoDy+sw@^EbQ$2Pm z8V_DK?LZziZ+7cBq_~ZGO&bc!#T(t;u(OrVsbpm)62U_1-v{9)} zU$W0UlaVRsInNFHUqD;GngX1J_GF8P$7;mx;|LhX9J>?u4qx|IGEvbrfI#bslfAnw zPcD*7E0nb|p^@J5g*Sb~t&tgJ`-n42I$4?w8LO5}= z$FVgUe1bzq*;vqVOa;kzQWFQS_dXrcr@ACc&6=5 zAr9**m#x8?ySe3dJ+3Xr=8$)ut%E!w&LGRMo4b6C1lyoia3nxBa<2Fq6<~+^mq1#y zK_V5YPM^fX*fe}W*R^ojYwG?2fj#aSiADbCrvXaN|>ujbAy8K|_tX-GHuhddA#jqfY=7qrkU2q7; zMJn~-fe5&Qx6qkwU`)r>R%;32J7xhZ=2M^JU~(%#cq}$8wTC!z^=r~A!w^{R-BBh7 zoGlbqHjO0hreOvM*9A)&0p(#hb5Cwos)JV#tnp>u+_m`NH>~)ke%7Er1>RfWHyoLI zljytL?U6d7oY?`PVU2JyjrB?$nvaZn$bl*=ZL?V1_H^cr!ByLZ$=Ix0%|g59^y0>Y zyA)_Ft|$qz5^?c~CHm8?2N9u+$Vx`G4Wr+bnh1cs1NG21pp;j&Tpan~SAN>w_5gm? z@^IGl!>zQBi5l~urK@L4jBS4`J-hxu@i;8B`OXdN<=-E{GE_Txl!#}Nm0A(a4kSr- zX8N)(tM}znMNBHpDGwUU;c5mpKcVNbSei106N_U z7zbtd(Yr1T{i%qDv4C0K-&PPFQGMiTEA|Xj$)B*G*o(~L;kl*zK|36*&JemJa_W9U z^WfFp(%7M-_GzxN1HdTbu7Z3W*Dcl%lwz(A~q)9Ww)OY$aq+J?O zTxP=qmLpy=6me#MQCv!HLYsXS}_QVHlKMgjvuQMqKpkz9LIn%Nf=SstR zNcPUiy9jAB(3%Z}hEN}j)PI%Bg;>o~G_u9V||JNzp?;qo{_y@57;(N3PL+lp6zTJA&Pk zH}W93@5JF07dDXteMd2x6LX!GIRDF)KZ^jC<>?u^-!gYbZh0{Q1A08HEAb(LSa!0` zui>4|&>je*5(f7tUdr-NGQ2}CYI7v0N9e)ynnpC}r0scHtuNsU!04&CoX$Jzs(oTj zsbNKDyl%g^UdVLqlNC|eSJ{UzhO7s{p~R@Mpl$^zXO{J29a1NlaY7->L|jnOFV)cN zBK5-G6dx~T(}1j^bef)8Nq5QZY;XKR{VIDuHkCTGV;~+^v3NcZJKP4V_y?tBTH8m>VJtm&Wj6Jo1)X$>F23vfe&WYqP7el&oy$W&bD27TKP8f8kvI7N6yz%m6w=a zYY(u=R*z-oq+*%kY+~Hw(1PaLB}ZoX4FnN!i|eI}>SJmr1*vbjY%>QIuA6d%oJI@m zoLh3+$XhYrut+aHO2j#4-6El4rXM!M-n85*Nr4Xj(V}?odiVW_^X@YE<1WBPB)NjT z-<_m`LC5p?2M^Q1ja?z@3p#FOAZo2jmM066vHWd=El$=-<2Q$iI7vSdBLmY0d{T0% z;ncmjG~Yx@+NGeJLG~?&gV?x9E#{zG8B51wB)oq`D9RwP z$RNt3Ew?>_^+JNw`gZ&T_JW4tjI41MI!O-R2Lev1KyXRgR$yIBZ|QLX4&)ks+bUHwDdI^)|ypLk)?As4S!9dia_2f;HyYn zl2UqTxgW6lD*TIL@Ayg786JlR*#hTbV6oOTVHEP7JmHj!aF(;_ou z5TBJ4qAP<>1<86nc7JNSHOMxg{NoIR++uQzxsNV!dI{b(Hy6w7Mf7}*_v@?Zk3H~J z06oFgv}555&uaR7wiRnejb5tqqNJcT$WDB-Il%^4SK4xoRZ z;dI8EToa~{=#i~-M4Sj-I#yb*@?kkqt28etCkJvyiIrR!orG8$IKqf1tOx2%R&Ek$ zcsHHNfID2S*FylDlZ%r0mDF?P-T3TOnl>V9jCYt_kF>Q@ZH;o&p#f!_gw^gVe6;bg z?$f+?g1ns6j8KSv&y~JLq2XT{)?pJOJstY99Sx6)QW`jhs>pcBe#xnNj1Nk4OIVt+ zit`YIEPVNcaT;jJ2|oS=MzOgy-)Zh-E$k#CM%5hVKeH|-Lq|(4j(BTAa1_{90<*rD{M z%ECqE&chFDFMhB)bl4exoRKd%#YQZF@Ta&KV-dk40lT!^@$ z6QcsT*C#YcD^R{!D0lttF#fj0Xd@^L?Tv@2oP0 z#bV1>aO{tOw9CmmcG32ggw@*AboEU+ukoPOwb=2@kCg~$qcvQPncGa)w6eO7&sA}^Y%iL9Y=?KT z&CN7aDci?9VE_I=gcLh)Gh$64Mq;Z^?qinjcmN+g^;%1aC>^^PsSMGaM=|rbJx%7I zZt@KdQep&lWUC}3(B@Y%H-}i^SA|6ASl~|+DK5!{(m|`^5%O^CLu{Sd^n)@EQxB3P}t&#Q}LBwrxh#Z7FaZ*BNH8T36Q6H|ASh?r2NjWF2 zM|$ce6Isr_RrljM5s_b`(@;*K8|iD>_BCQN*{MX^(>3Up9{u=fT9$`M*(_zq*heQq zoIlxECijreOG$xFW+HFF9m6yu3(lYAi6DEuO06pN-z3 zFtlUB`D+i7JB8~h++O~JN+qJxosI!h0hCh06e(*E7?b+$N5D8tn#bD zEAYN&&{BO^_UkgTcx329IuZcIq{ts_j^C zKO$Czh2r=}cUocUb?7U1>j%Sv&4Gcr@^@Y4!wMAlAJuz5`Q2*a;fq=sN(%uf*@BUu zsZP)e$G(PTxWE^hSS+ljRt~E@8*r0Cl(XFl_1ht0FnokT@Pu1T;`d06GtNP&kfYTM z^%TCA3aEC+YxwtMS4?U!ABB3d)hP7o=-ZMUIw_=q08){=#We53uBuG57jw@o;UPoj zTT-q&eJ7e@O3OnH&__-g(*KZ+?V`lx7Hc~RBVB1z)VVUfj;CPQreqfA=6vgze-d?C zJqWEAel7i8X4jRL_M3~uZg&639JO^Uc0De6H|@h`5;eHNSS5OWooVuDl$42SDa;s4 z0Q4`oZ5YfB!We6FL*E@DyFf4YuvpQRE21H8dyLeH>4kJ?J<RlqcL2KIghD$Kxc+G*np)* z)sGDUB2ICAF2@;qQ$n)~2fzbA=BCGm(Ju49OZ~{+xd@nWpqZk+nwb@~si=ABq1Gr0 z1oHI_en`tcAnm>2L%e-pjpBiVTH;ZAkETL@B3*la!zAqsq96Ff+8Woj#Pf?EaUJxR zu_QWO``d}@7>)yG+eYL0-hYtWdE+zrYA(^5_Lc%X){cIF2kPK>kKv7;Nq?ow1VyFk*W6COJ)(=!nKi@6Ki7{Op-XYwkpJN^Ia)tiI{tKs3qI6t|Swp|Ady zh}u!XO}PcSh176Kxy!_EWwUeBewNwgk;7Rbu;%*xfF(lB(b z?cBOHBpwdcSl(o3L6)1~Z?7LM2p~G;R_Z(VaI~?}DamY1)vYH&I>e)p2v8%**Jpmk z^VPs~g6g?c+plr&=p#=NCnE%%4y}{wS|dk#=Qo27{1!8K>8k%}PP3iUYP^aV6*ch= zX{UkN9h6v$J-1U%hZvn)%~tkbzs}WSdD?be2{C#1pCHyHKIlJyt4HrKBc%H(5zX{fl2?t&_d>XehL z|0!buAbwAa8Wu_gew}3Vbf|-Z$UWqP+^AjFje`cvY%-Mnbnx$p;4HEJqm#3R7;Oywueu$tb$jg! z#gY^&eFO;6_TyHKAL$90T<)x~bAt1`2=94?5iU0EiUN1=UVawW>A@$Y^xG_8w`MU~ z00CIn2MR1kMn_VvefO>KINMDk*txo@WPyJx+63|{>gR- zNg}9yUn|>KzXp$bt$WyZ{Q!1@<(*{YJbO4YZ6WaXx=uNf4LVHZdN17tIV+yPuu8K@ zbBue>$cqY{wFUBPh_mAQ?du=z%r69;o=oL>PSYQ z5#;kBnY+Vx3Z*%LIR3~{v<>$Iql&cLZCw|Q52Cah{{OJo zWmd8r_c)D{_a+{Yk;wm4PX&1Wmf0@(OOA=hbS$y;2|=qMAy13(M{03fNge$WQHtgcl*RSt0d+g@1S5{g*k{! z>k9kKx0%&o&@e>R!4OUjv~VgIcv`Z+_rT1;=4{^7)v_leRHUbkDhOH)!=j|?)CZ&D}@vC!J8fQSAl0T?L_*~ zY@OAymE28l_d^D87l@T372K4Lz?3RmOYJ(lSF4?$+k|k{@0Zf|*-_`KCn+sLDc@ll z62`!fl`8fA7hQIgMSkc9D>v)Ame)EvZp0sLuPNdKmXqCEibFGrX4prove@{U7hgYb zAGggMSwFglTI>A@c4}AT-Kv}QLun&Os#FiFt_L0)4_oJgKYDZb77mzi zLx&t6^Fx-|7`*2XNoIe|wqi%}aPe5=wN)ka-B506z&5CQZEovwWafgflKpO~dAd@l z8|gb@ko2I1zGdLOI#+hDmz53b(RLdYWZ}cb6&JUN#>|2ja#h~;gVYBhtv{WvyY|cS z1X{Nshq`>Wo!U7aMysqj!S@-u73z_yie+VoQ1YH+Q2XhrrS~(tx*nZg@hui?wqkFY zhTM`;ZvY$0Jgqd!&m=FW~9X$`NAV zt|C@A*Y3fy-W`})w2c7mcz;A(IP#iLX1IKN;zEhWg+upPqhut=n`p#CjCX2&85_$9 zgYI#8lB=%2nw<{!3V!(ZW-Ib8-MC7TaK*a{CnkV;R!!d`8dURY+qK63#b&fP4AB1B zARIxxqSW&MuI8a&bw&*1__~!^aU^~eem<(}$`@N~P!J0Dx`yWi-Mb*nf!xC>+9 z@(~jB38nRKoVk#$&rCHi=|>3qs>Si$QnU=6Tvr^}uzOOCYF=!BEd6I&y!oKqh5I&JC11!Ud2*6bF!xjW`|vU$;LLSg0O@`$zU`*vcZVPzbH| zx_byD!;15Mec*birCbJ{L3lHdL+z_U9?uu{U8NepK1tTKpP*V0F)kk7zkV?Lg&<~N?}h4X!rW5 z^63KuGG*@lUxp&V!sYbdt8X{+`+5`jd_Bq0dEtIqcu{a^nX zi2Mh0Aw2%eo@CfGc}4+r8`R-RJNq_K{eR6N2G{RFfcz_!{{NWASNuF~WD%k(yqN{B znLa9wK%ePcCNS9pd#YyNb>RQZhgba&dM8N3t3;|#qz+Wj0Z;@=H06+dm?tT6rd`|;vVrKQe=QQA=15Y3U zCI-a#JzHS2{~7FdCY})Z6ZS-LiosEkpLmE6G5t&Qzleo$IEkPfM*K?-yZi@o_=;ms zQMNj{B3}N_+7Ox>>R+03j6gymFaIg~hE<*S*i-2FE0vdSgvt zd}qH#J@bC)06bnd*oYtG-yo=liS{_GUJQC_MxCDtd{}#r!+uskjs8Ers>-A$gqm-* zn*v0YM-x))|D|D12A%((Z3%;0Vn0ZL*5?m|CbvhJl;Tdk(9C<`2raTLjzCxhxNj5K zBd6~hIhTOJVP#low_ihPLbEgunn8H;F-x6%p_>Xsd1RE$IDIDoY_^UhTS#_Vn~%+b z0dpPaes6`Dz$+Ej1@3z{b5{QJ3m+aziR>h>ji9Sc)}KHg%f|CiDCz3Q?g5W0~?vwyFIuqQ5NK2RGsa6R~9OFmXtjX;*KmHH}QCZKE4OBPQd zP6f_PYmL7lH(JNj46P4*uEe!AyE)6k$GFqas8 zV)_$-UhEG29g_@kiD&Zh4F;ua9E{#9Vnkwk#Vc*QRNfRCDLjMPEvjVuC^J%JC;O$L=FHN)W$ zT?{~bl|B&O!LV;j+Y{YBl^0F{wj@hJZxr5pV;FqnE&7lPI)JUot%Q}ps{ZTq@!Rwb zhRO((K}+9F0?TQ=kEg5rMo3o?^z+?J;D8Tnp!mMiggVU;;sBC#e(OUM7j<%;Ze}1l zZ{1UrLJJcM`!(cnQ8$#-&%8L*<>(=G_u&=KJ-7|KV@Z0N|M99`gqJkfNijdR1cfG^cFd|XMap#2A1;E6wb z?fnz6Cm50*l+CozH`HwrVNHgb4iWIck$iY!r-bwNwZCKqA@YR{4A0KMXs28Zf!ANH zIxhwxIP5}h{h|#l3U2pZAb=X7m_5`$0|KJPwVNFbe-m6IYTG>n<8`9^ZAJ{0Gv<%=3Ji6ah;D4lhju8{YJakcGtBJ9GLycK5 z9=cycj^Pb#EXl|IRl)E@zcMm(;_jiqBl~rQ{~g>zC6lyMFVxQ2UwFXlCHb|}1Uet% z!xNyOb5D^QJSGFXpwa?Q?A*5@weXVweth9W-{8Lc3l-7IaO8ds{eI8WxdU@EdGrnE z{RIHi+8}XC$x#B@!ypaHWNA_W8XlThMADZ-@fmjjFukPXI{~X(_WJ?=-7{xhU?#;* zqWFhkxVtwJq~4y)@`K0X?B5;ZidQ}85ePw<@M7j+ax96`diB-W;QhOUi@JEjagk;$ z7U-h!r3L#nG{v3>+aZI!F6!coEB0&HMm#it9^)`MGJ@cGboWfSQLP3}ME>pn?}MYi z`)bl3$&^dQypKEO!%3HJPj`r6&N2j z1zc%kH8bZ4!Fbd=H=}eJ%4V^yyfr2MWQL`78^9ID*DvSfxjtq5@1Os--+41_a}YBr z$HtzUP*@|dsdosWeDU0Sc{4~3{$(m<(YH5H@<4maE3u~P?Z3V4-+zkBalV1EP!MN8 zVH51B2(%{>39S^QpKt%l3oG>8W-cf;mh8Hy0qxQ3bMq{;1Ox+Ca1VYTx4PT;$7T2h zdq_q>4}-^-Z>|nO|2~I*`<(vkbpjhqXy6xb}95oO6w1ZWF{XyO^OFG|qQ z@5t~*Qx2GYm%<1yTa){@SN{7?)q9@p&2YBm&-CY7Zf~R*A^IMwrVj>75mIaQ{CLk| zRx`W*`E6s(#>NKV{7Hd-;@rGzECB(E)yXh{5~?qPvHO8mV_g`c@7|c=RYCmD%>5eH zi5xF56hD8PFKXmeHDgE1ZkgxA`rU#wwO<>RhDbm`B($$S%{ zP=oz`BjA5Kw0K0I+_t(cwHxV&uiN zUr4NRA|`<9#Y+v)sKKhOJk8WtT>^^Yok9*>45#Kj(ooEyy++#~&6s zXa;#uqZ1{A*7UUqx$=EU_q}99Vf>FH*zpCw4FdzyWkh~IME4E2wtd@de_90mfP^sO z1^xqUXh)jtr3(bfb&c!KNG`(1=VPNQ0(m)$RzCt_FV?R^6gRK zsa*ZJS`x0KqGaNZGmOsWas>1^i1fY&mBMK1eb6VxuOiQdwWrDcM0nlJyx%jpA(hLW z4R(w~Sn$rvYxdIX{F>|I--a}`LJsjXEA^WywZM-bV z*Us(_&>V!UYJ^y`;b!QC@@ay=s!;{uczyS5<*70czs+|BcapwXAByk8QdNPkB=-FV z0Jeg7fpGCD374vNBabL`u~GJ|j>~Td38=UTyoxk{!uT_z`_5<=aQj(B!g=``0c4M+ z?ydQ#8kS+Sf`Sk)xOeZ(JCT2se9?v=ZC+Hv!$a;2Q5;Y`3@bJFZAhbky-EKuGdx5C(MmC(-F zzv;#9eS!)~&SW&sNK<$AK+K>5zpGBM$9F=SH{%#ybfPc+xDa@VVfl7t+-<6DgupUA zgDgXdJ=EBQG);!W-ZgRVBfa8r)Y{vgB|mZjLIpo%8;&o zv%W2zjKaO2+IIldl74&3H|yD3K94iiZ0jVWlQ{<)f&L+SdpJ3@)tz;!(MZ$$RHLFI zv70v!T#CoQxA{-01ja3%-1|--1hXkooJQ+X!vvN}nfhTfUtixh*Q-(lhWRV+K|p!t zLZA^*+*`>K)y1~zMh>_1!66O;Av!R;$HSxOvyC2y7=F8x$pu*P+wk=eAYL$V1ka4( zM8S?70^`q=_~YCaZW-g~9cx`6X66aZt2lUXIReWk#^s`K-{Dk~?qMilhBch~HuNtB zydGq$QYpL4$#;t3l$^>;tO2fYn*dE>Ln!e2oa?SiS!wKTT*SZ|du8WFj+Ax+?+1|r z_%*mc-s;wq<&Rq!VxXO$(Jt`#P9W?A#A)}Ye{c#+(HR62XE z+k5W24!8uKJ$z6?>UY)TinJSVU%WQM=(xk;XZ%NabmvZy{t*w7v6+7Ri6bURIm z{Visg2A~LSn{!Wp6>K$gOS_I^^r7 zXYYpI*AW0Zu-p@ygr0)@p0v+|ggSmu*&g5t4u7^M!U#UTxQB9rkMsWp6GG1oME01B z&|{409t{$Dj4a!O5~0U1>4lec1lyOt3*Q$ue=)u z>tp;!&1p7HD4c+QH%bTZFc56ccL;^I@I(GC`cJ_9UyJE<=gmpz*KM59RJ-^W>;E0G ze@XqnL*%be`x7EC7xY(>|97zbRbT%dA~2!%S7-kLl)r|`A0YW_o&5n4@Yf{$115is zlRrT6*W>yVB!7*QKVkCMIQbJM|0ftH-NW3SaAv&JFGC#zgADc$p1k}=V;lL;VUyN> zHo;8(F>J#7&ta1*|Lj1Y_{UgL!av7~{x13tiv5e}KR^QhlJ}o5`ODLPz~rwW`vWB4 wuk`){Cg{H^>kpXx)zg1~1pMD>oIG(M3t0QomvyW=9>1tFT6&sA>Zlw43pV;qpa1{> literal 0 HcmV?d00001 diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 4eb50bcf96a..fe9a96bcafc 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,5 +1,6 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] + polkadot[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs @@ -10,5 +11,3 @@ flowchart polkadot_sdk --> cumulus polkadot_sdk --> polkadot polkadot_sdk --> xcm - - diff --git a/docs/sdk/src/polkadot_sdk/polkadot.rs b/docs/sdk/src/polkadot_sdk/polkadot.rs index 61a6877696c..e2dcca4dc7d 100644 --- a/docs/sdk/src/polkadot_sdk/polkadot.rs +++ b/docs/sdk/src/polkadot_sdk/polkadot.rs @@ -6,14 +6,16 @@ //! //! - [Polkadot Forum](https://forum.polkadot.network/) //! - [Polkadot Parachains](https://parachains.info/) -//! - [Polkadot (multi-chain) Explorer](https://subscan.io/) +//! - [Polkadot (multi-chain) Explorer: Subscan](https://subscan.io/) //! - Polkadot Fellowship //! - [Manifesto](https://github.com/polkadot-fellows/manifesto) //! - [Runtimes](https://github.com/polkadot-fellows/runtimes) //! - [RFCs](https://github.com/polkadot-fellows/rfcs) +//! - [Dashboard](https://polkadot-fellows.github.io/dashboard/) //! - [Polkadot Specs](spec.polkadot.network) //! - [The Polkadot Parachain Host Implementers' Guide](https://paritytech.github.io/polkadot-sdk/book/) //! - [Whitepaper](https://www.polkadot.network/whitepaper/) +//! - [JAM Graypaper](https://graypaper.com) //! //! ## Alternative Node Implementations ๐ŸŒˆ //! -- GitLab From 8949856d840c7f97c0c0c58a3786ccce5519a8fe Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 22 May 2024 21:26:33 +0200 Subject: [PATCH 051/106] Refactor Nomination Pool to support multiple staking strategies (#3905) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Third and final PR in the set, closes https://github.com/paritytech/polkadot-sdk/issues/454. Original PR: https://github.com/paritytech/polkadot-sdk/pull/2680 ## Precursors: - https://github.com/paritytech/polkadot-sdk/pull/3889. - https://github.com/paritytech/polkadot-sdk/pull/3904. ## Follow up issues/improvements - https://github.com/paritytech/polkadot-sdk/issues/4404 Overall changes are documented here (lot more visual ๐Ÿ˜): https://hackmd.io/@ak0n/454-np-governance ## Summary of various roles ๐Ÿคฏ ### Pallet Staking **Nominator**: An account that directly stakes on `pallet-staking` and nominates a set of validators. **Stakers**: Common term for nominators and validators. Virtual Stakers: Same as stakers, but they are keyless accounts and their locks are managed by a pallet external to `pallet-staking`. ### Pallet Delegated Staking **Agent**: An account that receives delegation from other accounts (delegators) and stakes on their behalf. They are also Virtual Stakers in `pallet-staking` where `pallet-delegated-staking` manages its locks. **Delegator**: An account that delegates some funds to an agent. ### Pallet Nomination Pools **Pool account**: Keyless account of a pool where funds are pooled. Members pledge their funds towards the pools. These are going to become `Agent` accounts in `pallet-delegated-staking`. **Pool Members**: They are individual members of the pool who contributed funds to it. They are also `Delegator` in `pallet-delegated-staking`. ## Changes ### Multiple Stake strategies **TransferStake**: The current nomination pool logic can be considered a staking strategy where delegators transfer funds to pool and stake. In this scenario, funds are locked in pool account, and users lose the control of their funds. **DelegateStake**: With this PR, we introduce a new staking strategy where individual delegators delegate fund to pool. `Delegate` implies funds are locked in delegator account itself. Important thing to note is, pool does not have funds of its own, but it has authorization from its members to use these funds for staking. We extract out all the interaction of pool with staking interface into a new trait `StakeStrategy`. This is the logic that varies between the above two staking strategies. We use the trait `StakeStrategy` to implement above two strategies: `TransferStake` and `DelegateStake`. ### NominationPool Consumes an implementation of `StakeStrategy` instead of `StakingInterface`. I have renamed it from `Staking` to `StakeAdapter` to clarify the difference from the earlier used trait. To enable delegation based staking in pool, Nomination pool can be configured as: ``` type StakeAdapter = pallet_nomination_pools::adapter::DelegateStake; ``` Note that with the following configuration, the changes in the PR are no-op. ``` type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; ``` ## Deployment roadmap Plan to enable this only in Westend. In production runtimes, we can keep pool to use `TransferStake` which will be no functional change. Once we have a full audit, we can enable this in Kusama followed by Polkadot. ## TODO - [x] Runtime level (Westend) migration for existing nomination pools. - [x] Permissionless call/ pallet::tasks for claiming delegator funds. - [x] Add/update benches. - [x] Migration tests. - [x] Storage flag to mark `DelegateStake` migration and integrity checks to not allow `TransferStake` for migrated runtimes. --------- Signed-off-by: Matteo Muraca Signed-off-by: Alexandru Gheorghe Signed-off-by: Andrei Sandu Signed-off-by: Adrian Catangiu Signed-off-by: Alexandru Vasile Signed-off-by: Oliver Tale-Yazdi Signed-off-by: divdeploy Signed-off-by: dependabot[bot] Signed-off-by: hongkuang Co-authored-by: Bastian Kรถcher Co-authored-by: gemini132 <164285545+gemini132@users.noreply.github.com> Co-authored-by: Matteo Muraca <56828990+muraca@users.noreply.github.com> Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Co-authored-by: Alessandro Siniscalchi Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Co-authored-by: Ross Bulat Co-authored-by: Serban Iorga Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Co-authored-by: Sam Johnson Co-authored-by: Adrian Catangiu Co-authored-by: Javier Viola <363911+pepoviola@users.noreply.github.com> Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Niklas Adolfsson Co-authored-by: Dastan <88332432+dastansam@users.noreply.github.com> Co-authored-by: Clara van Staden Co-authored-by: Ron Co-authored-by: Vincent Geddes Co-authored-by: Svyatoslav Nikolsky Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Dino Paฤandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: Andrei Eres Co-authored-by: Alin Dima Co-authored-by: Andrei Sandu Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Bastian Kรถcher Co-authored-by: Branislav Kontur Co-authored-by: Sebastian Kunert Co-authored-by: gupnik Co-authored-by: Vladimir Istyufeev Co-authored-by: Lulu Co-authored-by: Juan Girini Co-authored-by: Francisco Aguirre Co-authored-by: Dรณnal Murray Co-authored-by: Shawn Tabrizi Co-authored-by: Kutsal Kaan Bilgin Co-authored-by: Ermal Kaleci Co-authored-by: ordian Co-authored-by: divdeploy <166095818+divdeploy@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Co-authored-by: Squirrel Co-authored-by: HongKuang <166261675+HongKuang@users.noreply.github.com> Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Egor_P Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: Dmitry Markin Co-authored-by: Alexandru Vasile Co-authored-by: Lรฉa Narzis <78718413+lean-apple@users.noreply.github.com> Co-authored-by: Gonรงalo Pestana Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Co-authored-by: command-bot <> Co-authored-by: PG Herveou Co-authored-by: jimwfs Co-authored-by: jimwfs <169986508+jimwfs@users.noreply.github.com> Co-authored-by: polka.dom --- Cargo.lock | 30 +- Cargo.toml | 3 +- polkadot/runtime/westend/Cargo.toml | 4 + polkadot/runtime/westend/src/lib.rs | 45 +- .../src/weights/pallet_nomination_pools.rs | 463 +++++-- prdoc/pr_3905.prdoc | 25 + substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/frame/delegated-staking/Cargo.toml | 4 + .../frame/delegated-staking/src/impls.rs | 19 +- substrate/frame/delegated-staking/src/lib.rs | 25 +- substrate/frame/delegated-staking/src/mock.rs | 45 +- .../frame/delegated-staking/src/tests.rs | 503 ++++++- .../frame/delegated-staking/src/types.rs | 1 - .../test-staking-e2e/src/lib.rs | 2 +- .../test-staking-e2e/src/mock.rs | 2 +- .../nomination-pools/benchmarking/Cargo.toml | 3 + .../benchmarking/src/inner.rs | 251 +++- .../nomination-pools/benchmarking/src/lib.rs | 1 + .../nomination-pools/benchmarking/src/mock.rs | 22 +- .../frame/nomination-pools/fuzzer/src/call.rs | 2 +- .../frame/nomination-pools/src/adapter.rs | 389 ++++++ substrate/frame/nomination-pools/src/lib.rs | 360 +++-- .../frame/nomination-pools/src/migration.rs | 146 ++- substrate/frame/nomination-pools/src/mock.rs | 12 +- substrate/frame/nomination-pools/src/tests.rs | 92 +- .../frame/nomination-pools/src/weights.rs | 450 ++++--- .../test-delegate-stake/Cargo.toml | 41 + .../test-delegate-stake/src/lib.rs | 1158 +++++++++++++++++ .../test-delegate-stake/src/mock.rs | 406 ++++++ .../Cargo.toml | 2 +- .../src/lib.rs | 0 .../src/mock.rs | 2 +- substrate/frame/staking/src/ledger.rs | 2 +- substrate/frame/staking/src/lib.rs | 2 +- substrate/frame/staking/src/pallet/impls.rs | 10 +- substrate/frame/staking/src/pallet/mod.rs | 1 + substrate/frame/staking/src/slashing.rs | 2 +- substrate/primitives/staking/src/lib.rs | 13 + 38 files changed, 4052 insertions(+), 488 deletions(-) create mode 100644 prdoc/pr_3905.prdoc create mode 100644 substrate/frame/nomination-pools/src/adapter.rs create mode 100644 substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml create mode 100644 substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs create mode 100644 substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs rename substrate/frame/nomination-pools/{test-staking => test-transfer-stake}/Cargo.toml (96%) rename substrate/frame/nomination-pools/{test-staking => test-transfer-stake}/src/lib.rs (100%) rename substrate/frame/nomination-pools/{test-staking => test-transfer-stake}/src/mock.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index f8126787531..e3a72ca23d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10171,6 +10171,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", + "pallet-nomination-pools", "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", @@ -10844,6 +10845,7 @@ dependencies = [ "frame-system", "pallet-bags-list", "pallet-balances", + "pallet-delegated-staking", "pallet-nomination-pools", "pallet-staking", "pallet-staking-reward-curve", @@ -10884,7 +10886,32 @@ dependencies = [ ] [[package]] -name = "pallet-nomination-pools-test-staking" +name = "pallet-nomination-pools-test-delegate-stake" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-bags-list", + "pallet-balances", + "pallet-delegated-staking", + "pallet-nomination-pools", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std 14.0.0", + "sp-tracing 16.0.0", +] + +[[package]] +name = "pallet-nomination-pools-test-transfer-stake" version = "1.0.0" dependencies = [ "frame-election-provider-support", @@ -22848,6 +22875,7 @@ dependencies = [ "pallet-beefy-mmr", "pallet-collective", "pallet-conviction-voting", + "pallet-delegated-staking", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", diff --git a/Cargo.toml b/Cargo.toml index 239e1f5de99..54fa44d6654 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -374,7 +374,8 @@ members = [ "substrate/frame/nomination-pools/benchmarking", "substrate/frame/nomination-pools/fuzzer", "substrate/frame/nomination-pools/runtime-api", - "substrate/frame/nomination-pools/test-staking", + "substrate/frame/nomination-pools/test-delegate-stake", + "substrate/frame/nomination-pools/test-transfer-stake", "substrate/frame/offences", "substrate/frame/offences/benchmarking", "substrate/frame/paged-list", diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 01e9dd1527f..56623272be8 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -83,6 +83,7 @@ pallet-society = { path = "../../../substrate/frame/society", default-features = pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", path = "../../../substrate/frame/staking/reward-curve" } pallet-staking-runtime-api = { path = "../../../substrate/frame/staking/runtime-api", default-features = false } +pallet-delegated-staking = { path = "../../../substrate/frame/delegated-staking", default-features = false } pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migration", default-features = false } pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } @@ -161,6 +162,7 @@ std = [ "pallet-beefy/std", "pallet-collective/std", "pallet-conviction-voting/std", + "pallet-delegated-staking/std", "pallet-democracy/std", "pallet-election-provider-multi-phase/std", "pallet-election-provider-support-benchmarking?/std", @@ -244,6 +246,7 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-conviction-voting/runtime-benchmarks", + "pallet-delegated-staking/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", @@ -304,6 +307,7 @@ try-runtime = [ "pallet-beefy/try-runtime", "pallet-collective/try-runtime", "pallet-conviction-voting/try-runtime", + "pallet-delegated-staking/try-runtime", "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index cfe0dde0da1..4bf132d82c9 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -647,7 +647,7 @@ impl pallet_staking::Config for Runtime { type HistoryDepth = frame_support::traits::ConstU32<84>; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; - type EventListeners = NominationPools; + type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } @@ -1360,7 +1360,8 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; type PostUnbondingPoolsWindow = ConstU32<4>; type MaxMetadataLen = ConstU32<256>; // we use the same number of allowed unlocking chunks as with staking. @@ -1370,6 +1371,21 @@ impl pallet_nomination_pools::Config for Runtime { type AdminOrigin = EitherOf, StakingAdmin>; } +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} + +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + impl pallet_root_testing::Config for Runtime { type RuntimeEvent = RuntimeEvent; } @@ -1518,6 +1534,10 @@ mod runtime { #[runtime::pallet_index(37)] pub type Treasury = pallet_treasury; + // Staking extension for delegation + #[runtime::pallet_index(38)] + pub type DelegatedStaking = pallet_delegated_staking; + // Parachains pallets. Start indices at 40 to leave room. #[runtime::pallet_index(41)] pub type ParachainsOrigin = parachains_origin; @@ -1621,11 +1641,12 @@ pub type SignedExtra = ( frame_metadata_hash_extension::CheckMetadataHash, ); -pub struct NominationPoolsMigrationV4OldPallet; -impl Get for NominationPoolsMigrationV4OldPallet { - fn get() -> Perbill { - Perbill::from_percent(100) - } +parameter_types! { + // This is the max pools that will be migrated in the runtime upgrade. Westend has more pools + // than this, but we want to emulate some non migrated pools. In prod runtimes, if weight is not + // a concern, it is recommended to set to (existing pools + 10) to also account for any new + // pools getting created before the migration is actually executed. + pub const MaxPoolsToMigrate: u32 = 250; } /// All migrations that will run on the next runtime upgrade. @@ -1658,7 +1679,15 @@ pub mod migrations { } /// Unreleased migrations. Add new ones here: - pub type Unreleased = (pallet_staking::migrations::v15::MigrateV14ToV15,); + pub type Unreleased = ( + // Migrate NominationPools to `DelegateStake` adapter. This is unversioned upgrade and + // should not be applied yet in Kusama/Polkadot. + pallet_nomination_pools::migration::unversioned::DelegationStakeMigration< + Runtime, + MaxPoolsToMigrate, + >, + pallet_staking::migrations::v15::MigrateV14ToV15, + ); } /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs index 6aa5ddd1ec8..35eef199fb7 100644 --- a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs +++ b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_nomination_pools` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dcu62vjg-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,7 +54,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) + /// Storage: `Staking::Bonded` (r:2 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) @@ -62,7 +62,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `NominationPools::MaxPoolMembersPerPool` (r:1 w:0) /// Proof: `NominationPools::MaxPoolMembersPerPool` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -70,10 +70,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:3 w:3) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -82,13 +88,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3355` + // Measured: `3606` // Estimated: `8877` - // Minimum execution time: 173_707_000 picoseconds. - Weight::from_parts(179_920_000, 0) + // Minimum execution time: 204_877_000 picoseconds. + Weight::from_parts(210_389_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(20)) - .saturating_add(T::DbWeight::get().writes(13)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(15)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) @@ -98,16 +104,20 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:3 w:2) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) + /// Storage: `Staking::Bonded` (r:2 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:3 w:3) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -116,13 +126,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3365` + // Measured: `3762` // Estimated: `8877` - // Minimum execution time: 174_414_000 picoseconds. - Weight::from_parts(178_068_000, 0) + // Minimum execution time: 203_362_000 picoseconds. + Weight::from_parts(209_899_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(13)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(14)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) @@ -134,16 +144,20 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:3 w:3) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) + /// Storage: `Staking::Bonded` (r:2 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -152,13 +166,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3312` - // Estimated: `8799` - // Minimum execution time: 198_864_000 picoseconds. - Weight::from_parts(203_783_000, 0) - .saturating_add(Weight::from_parts(0, 8799)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(13)) + // Measured: `3709` + // Estimated: `6248` + // Minimum execution time: 230_686_000 picoseconds. + Weight::from_parts(237_502_000, 0) + .saturating_add(Weight::from_parts(0, 6248)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(14)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) @@ -176,8 +190,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `1138` // Estimated: `4182` - // Minimum execution time: 70_250_000 picoseconds. - Weight::from_parts(72_231_000, 0) + // Minimum execution time: 70_821_000 picoseconds. + Weight::from_parts(72_356_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -194,7 +208,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -202,10 +216,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:3 w:3) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -216,13 +228,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3545` + // Measured: `3341` // Estimated: `8877` - // Minimum execution time: 155_853_000 picoseconds. - Weight::from_parts(161_032_000, 0) + // Minimum execution time: 156_714_000 picoseconds. + Weight::from_parts(158_305_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(20)) - .saturating_add(T::DbWeight::get().writes(13)) + .saturating_add(T::DbWeight::get().reads(18)) + .saturating_add(T::DbWeight::get().writes(11)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) @@ -232,23 +244,25 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1744` - // Estimated: `4764` - // Minimum execution time: 62_933_000 picoseconds. - Weight::from_parts(65_847_171, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 1_476 - .saturating_add(Weight::from_parts(59_648, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `1767` + // Estimated: `4556` + // Minimum execution time: 56_836_000 picoseconds. + Weight::from_parts(59_738_398, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 1_478 + .saturating_add(Weight::from_parts(60_085, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -259,18 +273,24 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -278,15 +298,15 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2134` - // Estimated: `4764` - // Minimum execution time: 123_641_000 picoseconds. - Weight::from_parts(127_222_589, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_493 - .saturating_add(Weight::from_parts(83_361, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(9)) + // Measured: `2405` + // Estimated: `4556` + // Minimum execution time: 136_737_000 picoseconds. + Weight::from_parts(141_757_658, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 2_609 + .saturating_add(Weight::from_parts(84_538, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(11)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) @@ -296,28 +316,38 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Locks` (r:2 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:2 w:1) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) @@ -326,6 +356,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::CounterForRewardPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForSubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForBondedPools` (r:1 w:1) @@ -337,13 +371,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2453` - // Estimated: `8538` - // Minimum execution time: 219_469_000 picoseconds. - Weight::from_parts(227_526_000, 0) - .saturating_add(Weight::from_parts(0, 8538)) - .saturating_add(T::DbWeight::get().reads(24)) - .saturating_add(T::DbWeight::get().writes(20)) + // Measured: `2809` + // Estimated: `6274` + // Minimum execution time: 241_043_000 picoseconds. + Weight::from_parts(250_578_253, 0) + .saturating_add(Weight::from_parts(0, 6274)) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(26)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -365,16 +399,30 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:2 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:2 w:1) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -391,22 +439,28 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1102` - // Estimated: `8538` - // Minimum execution time: 166_466_000 picoseconds. - Weight::from_parts(171_425_000, 0) - .saturating_add(Weight::from_parts(0, 8538)) - .saturating_add(T::DbWeight::get().reads(23)) - .saturating_add(T::DbWeight::get().writes(17)) + // Measured: `1168` + // Estimated: `6196` + // Minimum execution time: 180_902_000 picoseconds. + Weight::from_parts(187_769_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(23)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) @@ -426,14 +480,14 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1738` + // Measured: `1921` // Estimated: `4556 + n * (2520 ยฑ0)` - // Minimum execution time: 59_650_000 picoseconds. - Weight::from_parts(60_620_077, 0) + // Minimum execution time: 78_369_000 picoseconds. + Weight::from_parts(79_277_958, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 7_316 - .saturating_add(Weight::from_parts(1_467_406, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12)) + // Standard Error: 8_343 + .saturating_add(Weight::from_parts(1_493_255, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) @@ -446,10 +500,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1363` + // Measured: `1406` // Estimated: `4556` - // Minimum execution time: 31_170_000 picoseconds. - Weight::from_parts(32_217_000, 0) + // Minimum execution time: 32_631_000 picoseconds. + Weight::from_parts(33_356_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -465,11 +519,11 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3735` - // Minimum execution time: 12_603_000 picoseconds. - Weight::from_parts(13_241_702, 0) + // Minimum execution time: 12_514_000 picoseconds. + Weight::from_parts(13_232_732, 0) .saturating_add(Weight::from_parts(0, 3735)) - // Standard Error: 116 - .saturating_add(Weight::from_parts(1_428, 0).saturating_mul(n.into())) + // Standard Error: 150 + .saturating_add(Weight::from_parts(2_371, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -489,8 +543,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_608_000 picoseconds. - Weight::from_parts(3_801_000, 0) + // Minimum execution time: 3_107_000 picoseconds. + Weight::from_parts(3_255_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -500,18 +554,22 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3719` - // Minimum execution time: 16_053_000 picoseconds. - Weight::from_parts(16_473_000, 0) + // Minimum execution time: 16_568_000 picoseconds. + Weight::from_parts(17_019_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -526,12 +584,12 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1901` + // Measured: `2138` // Estimated: `4556` - // Minimum execution time: 57_251_000 picoseconds. - Weight::from_parts(59_390_000, 0) + // Minimum execution time: 73_717_000 picoseconds. + Weight::from_parts(77_030_000, 0) .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -546,8 +604,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `770` // Estimated: `3719` - // Minimum execution time: 29_888_000 picoseconds. - Weight::from_parts(31_056_000, 0) + // Minimum execution time: 30_770_000 picoseconds. + Weight::from_parts(31_556_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -560,8 +618,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `538` // Estimated: `3719` - // Minimum execution time: 15_769_000 picoseconds. - Weight::from_parts(16_579_000, 0) + // Minimum execution time: 16_257_000 picoseconds. + Weight::from_parts(16_891_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -572,8 +630,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3719` - // Minimum execution time: 15_385_000 picoseconds. - Weight::from_parts(16_402_000, 0) + // Minimum execution time: 16_548_000 picoseconds. + Weight::from_parts(18_252_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -584,8 +642,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3719` - // Minimum execution time: 14_965_000 picoseconds. - Weight::from_parts(15_548_000, 0) + // Minimum execution time: 16_085_000 picoseconds. + Weight::from_parts(17_218_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -598,8 +656,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `508` // Estimated: `4182` - // Minimum execution time: 13_549_000 picoseconds. - Weight::from_parts(14_307_000, 0) + // Minimum execution time: 13_648_000 picoseconds. + Weight::from_parts(13_990_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -616,8 +674,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `968` // Estimated: `3719` - // Minimum execution time: 60_153_000 picoseconds. - Weight::from_parts(61_369_000, 0) + // Minimum execution time: 60_321_000 picoseconds. + Weight::from_parts(61_512_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -632,12 +690,135 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `867` + // Measured: `876` // Estimated: `4764` - // Minimum execution time: 64_985_000 picoseconds. - Weight::from_parts(66_616_000, 0) + // Minimum execution time: 65_609_000 picoseconds. + Weight::from_parts(67_320_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `3328` + // Estimated: `4556` + // Minimum execution time: 99_605_000 picoseconds. + Weight::from_parts(101_986_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:0) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `3070` + // Estimated: `4556` + // Minimum execution time: 58_103_000 picoseconds. + Weight::from_parts(59_680_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `1359` + // Estimated: `6196` + // Minimum execution time: 144_098_000 picoseconds. + Weight::from_parts(146_590_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(11)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:2) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `2275` + // Estimated: `6180` + // Minimum execution time: 148_594_000 picoseconds. + Weight::from_parts(152_119_000, 0) + .saturating_add(Weight::from_parts(0, 6180)) + .saturating_add(T::DbWeight::get().reads(15)) + .saturating_add(T::DbWeight::get().writes(6)) + } } diff --git a/prdoc/pr_3905.prdoc b/prdoc/pr_3905.prdoc new file mode 100644 index 00000000000..d1c03650c9b --- /dev/null +++ b/prdoc/pr_3905.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Allows Nomination Pool to use different staking strategies including a new DelegateStake strategy. + +doc: + - audience: Runtime Dev + description: | + This PR introduces a new staking strategy called `DelegateStake`. This strategy allows the nomination pool to + delegate its stake to a validator, that is, funds are locked in user account itself instead of being transferred + to the pool account. Includes migration of pools to this strategy for Westend. + +crates: + - name: pallet-nomination-pools + bump: major + - name: pallet-nomination-pools-benchmarking + bump: major + - name: sp-staking + bump: patch + - name: pallet-staking + bump: patch + - name: pallet-delegated-staking + bump: patch + - name: westend-runtime + bump: major diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 5067085e8ed..617088ffe1f 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -910,7 +910,7 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; type PostUnbondingPoolsWindow = PostUnbondPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index 4a489882711..3b122dc2e26 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -26,6 +26,7 @@ sp-io = { path = "../../primitives/io" } substrate-test-utils = { path = "../../test-utils" } sp-tracing = { path = "../../primitives/tracing" } pallet-staking = { path = "../staking" } +pallet-nomination-pools = { path = "../nomination-pools" } pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } pallet-staking-reward-curve = { path = "../staking/reward-curve" } @@ -39,6 +40,7 @@ std = [ "frame-support/std", "frame-system/std", "pallet-balances/std", + "pallet-nomination-pools/std", "pallet-staking/std", "pallet-timestamp/std", "scale-info/std", @@ -53,6 +55,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-nomination-pools/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", @@ -63,6 +66,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-balances/try-runtime", + "pallet-nomination-pools/try-runtime", "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs index b1945b0ce37..032f6120642 100644 --- a/substrate/frame/delegated-staking/src/impls.rs +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -109,7 +109,6 @@ impl DelegationMigrator for Pallet { reward_account.clone(), ) } - fn migrate_delegation( agent: &Self::AccountId, delegator: &Self::AccountId, @@ -121,6 +120,24 @@ impl DelegationMigrator for Pallet { value, ) } + + /// Only used for testing. + #[cfg(feature = "runtime-benchmarks")] + fn drop_agent(agent: &T::AccountId) { + >::remove(agent); + >::iter() + .filter(|(_, delegation)| delegation.agent == *agent) + .for_each(|(delegator, _)| { + let _ = T::Currency::release_all( + &HoldReason::StakingDelegation.into(), + &delegator, + Precision::BestEffort, + ); + >::remove(&delegator); + }); + + T::CoreStaking::migrate_to_direct_staker(agent); + } } impl OnStakingUpdate> for Pallet { diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 210f69d9c83..8581a4a981f 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -165,7 +165,10 @@ use frame_system::{ensure_signed, pallet_prelude::*, RawOrigin}; pub mod pallet { use super::*; + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData); #[pallet::config] @@ -245,6 +248,8 @@ pub mod pallet { Released { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, /// Funds slashed from a delegator. Slashed { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + /// Unclaimed delegation funds migrated to delegator. + MigratedDelegation { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, } /// Map of Delegators to their `Delegation`. @@ -371,7 +376,7 @@ pub mod pallet { ensure!(Self::is_agent(&agent), Error::::NotAgent); // and has enough delegated balance to migrate. - let proxy_delegator = Self::sub_account(AccountType::ProxyDelegator, agent); + let proxy_delegator = Self::generate_proxy_delegator(agent); let balance_remaining = Self::held_balance_of(&proxy_delegator); ensure!(balance_remaining >= amount, Error::::NotEnoughFunds); @@ -422,6 +427,12 @@ pub mod pallet { } impl Pallet { + /// Derive an account from the migrating agent account where the unclaimed delegation funds + /// are held. + pub fn generate_proxy_delegator(agent: T::AccountId) -> T::AccountId { + Self::sub_account(AccountType::ProxyDelegator, agent) + } + /// Derive a (keyless) pot account from the given agent account and account type. pub(crate) fn sub_account(account_type: AccountType, agent: T::AccountId) -> T::AccountId { T::PalletId::get().into_sub_account_truncating((account_type, agent.clone())) @@ -464,7 +475,7 @@ impl Pallet { // We create a proxy delegator that will keep all the delegation funds until funds are // transferred to actual delegator. - let proxy_delegator = Self::sub_account(AccountType::ProxyDelegator, who.clone()); + let proxy_delegator = Self::generate_proxy_delegator(who.clone()); // Keep proxy delegator alive until all funds are migrated. frame_system::Pallet::::inc_providers(&proxy_delegator); @@ -646,9 +657,9 @@ impl Pallet { !Self::is_delegator(destination_delegator) && !Self::is_agent(destination_delegator) ); + let agent = source_delegation.agent.clone(); // update delegations - Delegation::::new(&source_delegation.agent, amount) - .update_or_kill(destination_delegator); + Delegation::::new(&agent, amount).update_or_kill(destination_delegator); source_delegation.amount = source_delegation .amount @@ -684,6 +695,12 @@ impl Pallet { // hold the funds again in the new delegator account. T::Currency::hold(&HoldReason::StakingDelegation.into(), destination_delegator, amount)?; + Self::deposit_event(Event::::MigratedDelegation { + agent, + delegator: destination_delegator.clone(), + amount, + }); + Ok(()) } diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index 21a9fe6b227..b9eaffb970e 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -32,6 +32,8 @@ use frame_election_provider_support::{ }; use frame_support::dispatch::RawOrigin; use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra}; +use sp_core::U256; +use sp_runtime::traits::Convert; use sp_staking::{Stake, StakingInterface}; pub type T = Runtime; @@ -129,7 +131,7 @@ impl pallet_staking::Config for Runtime { type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; type MaxUnlockingChunks = ConstU32<10>; type MaxControllersInDeprecationBatch = ConstU32<100>; - type EventListeners = DelegatedStaking; + type EventListeners = (Pools, DelegatedStaking); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; @@ -149,8 +151,39 @@ impl delegated_staking::Config for Runtime { type CoreStaking = Staking; } +pub struct BalanceToU256; +impl Convert for BalanceToU256 { + fn convert(n: Balance) -> U256 { + n.into() + } +} +pub struct U256ToBalance; +impl Convert for U256ToBalance { + fn convert(n: U256) -> Balance { + n.try_into().unwrap() + } +} + parameter_types! { pub static MaxUnbonding: u32 = 8; + pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); +} +impl pallet_nomination_pools::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type RuntimeFreezeReason = RuntimeFreezeReason; + type RewardCounter = sp_runtime::FixedU128; + type BalanceToU256 = BalanceToU256; + type U256ToBalance = U256ToBalance; + type PostUnbondingPoolsWindow = ConstU32<2>; + type PalletId = PoolsPalletId; + type MaxMetadataLen = ConstU32<256>; + type MaxUnbonding = MaxUnbonding; + type MaxPointsToBalance = frame_support::traits::ConstU8<10>; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; + type AdminOrigin = frame_system::EnsureRoot; } frame_support::construct_runtime!( @@ -159,6 +192,7 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp, Balances: pallet_balances, Staking: pallet_staking, + Pools: pallet_nomination_pools, DelegatedStaking: delegated_staking, } ); @@ -297,9 +331,16 @@ pub(crate) fn get_agent(agent: &AccountId) -> Agent { parameter_types! { static ObservedEventsDelegatedStaking: usize = 0; + static ObservedEventsPools: usize = 0; +} + +pub(crate) fn pool_events_since_last_call() -> Vec> { + let events = System::read_events_for_pallet::>(); + let already_seen = ObservedEventsPools::get(); + ObservedEventsPools::set(events.len()); + events.into_iter().skip(already_seen).collect() } -#[allow(unused)] pub(crate) fn events_since_last_call() -> Vec> { let events = System::read_events_for_pallet::>(); let already_seen = ObservedEventsDelegatedStaking::get(); diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index 1f36f655beb..6b68726b274 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -20,8 +20,9 @@ use super::*; use crate::mock::*; use frame_support::{assert_noop, assert_ok, traits::fungible::InspectHold}; +use pallet_nomination_pools::{Error as PoolsError, Event as PoolsEvent}; use pallet_staking::Error as StakingError; -use sp_staking::DelegationInterface; +use sp_staking::{DelegationInterface, StakerStatus}; #[test] fn create_an_agent_with_first_delegator() { @@ -623,7 +624,7 @@ mod staking_integration { // to migrate, nominator needs to set an account as a proxy delegator where staked funds // will be moved and delegated back to this old nominator account. This should be funded // with at least ED. - let proxy_delegator = DelegatedStaking::sub_account(AccountType::ProxyDelegator, 200); + let proxy_delegator = DelegatedStaking::generate_proxy_delegator(200); assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(200).into(), 201)); @@ -683,3 +684,501 @@ mod staking_integration { }); } } + +mod pool_integration { + use super::*; + use pallet_nomination_pools::{BondExtra, BondedPools, PoolState}; + + #[test] + fn create_pool_test() { + ExtBuilder::default().build_and_execute(|| { + let creator: AccountId = 100; + fund(&creator, 500); + let delegate_amount = 200; + + // nothing held initially + assert_eq!(DelegatedStaking::held_balance_of(&creator), 0); + + // create pool + assert_ok!(Pools::create( + RawOrigin::Signed(creator).into(), + delegate_amount, + creator, + creator, + creator + )); + + // correct amount is locked in depositor's account. + assert_eq!(DelegatedStaking::held_balance_of(&creator), delegate_amount); + + let pool_account = Pools::generate_bonded_account(1); + let agent = get_agent(&pool_account); + + // verify state + assert_eq!(agent.ledger.effective_balance(), delegate_amount); + assert_eq!(agent.available_to_bond(), 0); + assert_eq!(agent.total_unbonded(), 0); + }); + } + + #[test] + fn join_pool() { + ExtBuilder::default().build_and_execute(|| { + // create a pool + let pool_id = create_pool(100, 200); + // keep track of staked amount. + let mut staked_amount: Balance = 200; + + // fund delegator + let delegator: AccountId = 300; + fund(&delegator, 500); + // nothing held initially + assert_eq!(DelegatedStaking::held_balance_of(&delegator), 0); + + // delegator joins pool + assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), 100, pool_id)); + staked_amount += 100; + + // correct amount is locked in depositor's account. + assert_eq!(DelegatedStaking::held_balance_of(&delegator), 100); + + // delegator is not actively exposed to core staking. + assert_eq!(Staking::status(&delegator), Err(StakingError::::NotStash.into())); + + let pool_agent = get_agent(&Pools::generate_bonded_account(1)); + // verify state + assert_eq!(pool_agent.ledger.effective_balance(), staked_amount); + assert_eq!(pool_agent.bonded_stake(), staked_amount); + assert_eq!(pool_agent.available_to_bond(), 0); + assert_eq!(pool_agent.total_unbonded(), 0); + + // cannot reap agent in staking. + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(100), pool_agent.key, 0), + StakingError::::VirtualStakerNotAllowed + ); + + // let a bunch of delegators join this pool + for i in 301..350 { + fund(&i, 500); + assert_ok!(Pools::join(RawOrigin::Signed(i).into(), 100 + i, pool_id)); + staked_amount += 100 + i; + assert_eq!(DelegatedStaking::held_balance_of(&i), 100 + i); + } + + let pool_agent = pool_agent.refresh().unwrap(); + assert_eq!(pool_agent.ledger.effective_balance(), staked_amount); + assert_eq!(pool_agent.bonded_stake(), staked_amount); + assert_eq!(pool_agent.available_to_bond(), 0); + assert_eq!(pool_agent.total_unbonded(), 0); + }); + } + + #[test] + fn bond_extra_to_pool() { + ExtBuilder::default().build_and_execute(|| { + let pool_id = create_pool(100, 200); + add_delegators_to_pool(pool_id, (300..310).collect(), 100); + let mut staked_amount = 200 + 100 * 10; + assert_eq!(get_pool_agent(pool_id).bonded_stake(), staked_amount); + + // bond extra to pool + for i in 300..310 { + assert_ok!(Pools::bond_extra( + RawOrigin::Signed(i).into(), + BondExtra::FreeBalance(50) + )); + staked_amount += 50; + assert_eq!(get_pool_agent(pool_id).bonded_stake(), staked_amount); + } + }); + } + + #[test] + fn claim_pool_rewards() { + ExtBuilder::default().build_and_execute(|| { + let creator = 100; + let creator_stake = 1000; + let pool_id = create_pool(creator, creator_stake); + add_delegators_to_pool(pool_id, (300..310).collect(), 100); + add_delegators_to_pool(pool_id, (310..320).collect(), 200); + let total_staked = creator_stake + 100 * 10 + 200 * 10; + + // give some rewards + let reward_acc = Pools::generate_reward_account(pool_id); + let reward_amount = 1000; + fund(&reward_acc, reward_amount); + + // claim rewards + for i in 300..320 { + let pre_balance = Balances::free_balance(i); + let delegator_staked_balance = DelegatedStaking::held_balance_of(&i); + // payout reward + assert_ok!(Pools::claim_payout(RawOrigin::Signed(i).into())); + + let reward = Balances::free_balance(i) - pre_balance; + assert_eq!(reward, delegator_staked_balance * reward_amount / total_staked); + } + + // payout creator + let pre_balance = Balances::free_balance(creator); + assert_ok!(Pools::claim_payout(RawOrigin::Signed(creator).into())); + // verify they are paid out correctly + let reward = Balances::free_balance(creator) - pre_balance; + assert_eq!(reward, creator_stake * reward_amount / total_staked); + + // reward account should only have left minimum balance after paying out everyone. + assert_eq!(Balances::free_balance(reward_acc), ExistentialDeposit::get()); + }); + } + + #[test] + fn withdraw_from_pool() { + ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + + let pool_id = create_pool(100, 1000); + let bond_amount = 200; + add_delegators_to_pool(pool_id, (300..310).collect(), bond_amount); + let total_staked = 1000 + bond_amount * 10; + let pool_acc = Pools::generate_bonded_account(pool_id); + + start_era(2); + // nothing to release yet. + assert_noop!( + Pools::withdraw_unbonded(RawOrigin::Signed(301).into(), 301, 0), + PoolsError::::SubPoolsNotFound + ); + + // 301 wants to unbond 50 in era 2, withdrawable in era 5. + assert_ok!(Pools::unbond(RawOrigin::Signed(301).into(), 301, 50)); + + // 302 wants to unbond 100 in era 3, withdrawable in era 6. + start_era(3); + assert_ok!(Pools::unbond(RawOrigin::Signed(302).into(), 302, 100)); + + // 303 wants to unbond 200 in era 4, withdrawable in era 7. + start_era(4); + assert_ok!(Pools::unbond(RawOrigin::Signed(303).into(), 303, 200)); + + // active stake is now reduced.. + let expected_active = total_staked - (50 + 100 + 200); + assert!(eq_stake(pool_acc, total_staked, expected_active)); + + // nothing to withdraw at era 4 + for i in 301..310 { + assert_noop!( + Pools::withdraw_unbonded(RawOrigin::Signed(i).into(), i, 0), + PoolsError::::CannotWithdrawAny + ); + } + + assert!(eq_stake(pool_acc, total_staked, expected_active)); + + start_era(5); + // at era 5, 301 can withdraw. + + System::reset_events(); + let held_301 = DelegatedStaking::held_balance_of(&301); + let free_301 = Balances::free_balance(301); + + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(301).into(), 301, 0)); + assert_eq!( + events_since_last_call(), + vec![Event::Released { agent: pool_acc, delegator: 301, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Withdrawn { member: 301, pool_id, balance: 50, points: 50 }] + ); + assert_eq!(DelegatedStaking::held_balance_of(&301), held_301 - 50); + assert_eq!(Balances::free_balance(301), free_301 + 50); + + start_era(7); + // era 7 both delegators can withdraw + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(302).into(), 302, 0)); + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(303).into(), 303, 0)); + + assert_eq!( + events_since_last_call(), + vec![ + Event::Released { agent: pool_acc, delegator: 302, amount: 100 }, + Event::Released { agent: pool_acc, delegator: 303, amount: 200 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 302, pool_id, balance: 100, points: 100 }, + PoolsEvent::Withdrawn { member: 303, pool_id, balance: 200, points: 200 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 303 }, + ] + ); + + // 303 is killed + assert!(!Delegators::::contains_key(303)); + }); + } + + #[test] + fn pool_withdraw_unbonded() { + ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + let pool_id = create_pool(100, 1000); + add_delegators_to_pool(pool_id, (300..310).collect(), 200); + + start_era(2); + // 1000 tokens to be unbonded in era 5. + for i in 300..310 { + assert_ok!(Pools::unbond(RawOrigin::Signed(i).into(), i, 100)); + } + + start_era(3); + // 500 tokens to be unbonded in era 6. + for i in 300..310 { + assert_ok!(Pools::unbond(RawOrigin::Signed(i).into(), i, 50)); + } + + start_era(5); + // withdraw pool should withdraw 1000 tokens + assert_ok!(Pools::pool_withdraw_unbonded(RawOrigin::Signed(100).into(), pool_id, 0)); + assert_eq!(get_pool_agent(pool_id).total_unbonded(), 1000); + + start_era(6); + // should withdraw 500 more + assert_ok!(Pools::pool_withdraw_unbonded(RawOrigin::Signed(100).into(), pool_id, 0)); + assert_eq!(get_pool_agent(pool_id).total_unbonded(), 1000 + 500); + + start_era(7); + // Nothing to withdraw, still at 1500. + assert_ok!(Pools::pool_withdraw_unbonded(RawOrigin::Signed(100).into(), pool_id, 0)); + assert_eq!(get_pool_agent(pool_id).total_unbonded(), 1500); + }); + } + + #[test] + fn update_nominations() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + // can't nominate for non-existent pool + assert_noop!( + Pools::nominate(RawOrigin::Signed(100).into(), 1, vec![99]), + PoolsError::::PoolNotFound + ); + + let pool_id = create_pool(100, 1000); + let pool_acc = Pools::generate_bonded_account(pool_id); + assert_ok!(Pools::nominate(RawOrigin::Signed(100).into(), 1, vec![20, 21, 22])); + assert!(Staking::status(&pool_acc) == Ok(StakerStatus::Nominator(vec![20, 21, 22]))); + + start_era(3); + assert_ok!(Pools::nominate(RawOrigin::Signed(100).into(), 1, vec![18, 19, 22])); + assert!(Staking::status(&pool_acc) == Ok(StakerStatus::Nominator(vec![18, 19, 22]))); + }); + } + + #[test] + fn destroy_pool() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let creator = 100; + let creator_stake = 1000; + let pool_id = create_pool(creator, creator_stake); + add_delegators_to_pool(pool_id, (300..310).collect(), 200); + + start_era(3); + // lets destroy the pool + assert_ok!(Pools::set_state( + RawOrigin::Signed(creator).into(), + pool_id, + PoolState::Destroying + )); + assert_ok!(Pools::chill(RawOrigin::Signed(creator).into(), pool_id)); + + // unbond all members by the creator/admin + for i in 300..310 { + assert_ok!(Pools::unbond(RawOrigin::Signed(creator).into(), i, 200)); + } + + start_era(6); + // withdraw all members by the creator/admin + for i in 300..310 { + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(creator).into(), i, 0)); + } + + // unbond creator + assert_ok!(Pools::unbond(RawOrigin::Signed(creator).into(), creator, creator_stake)); + + start_era(9); + System::reset_events(); + // Withdraw self + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(creator).into(), creator, 0)); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { + member: creator, + pool_id, + balance: creator_stake, + points: creator_stake, + }, + PoolsEvent::MemberRemoved { pool_id, member: creator }, + PoolsEvent::Destroyed { pool_id }, + ] + ); + + // Make sure all data is cleaned up. + assert!(!Agents::::contains_key(Pools::generate_bonded_account(pool_id))); + assert!(!System::account_exists(&Pools::generate_bonded_account(pool_id))); + assert!(!Delegators::::contains_key(creator)); + for i in 300..310 { + assert!(!Delegators::::contains_key(i)); + } + }); + } + + #[test] + fn pool_partially_slashed() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let creator = 100; + let creator_stake = 500; + let pool_id = create_pool(creator, creator_stake); + let delegator_stake = 100; + add_delegators_to_pool(pool_id, (300..306).collect(), delegator_stake); + let pool_acc = Pools::generate_bonded_account(pool_id); + + let total_staked = creator_stake + delegator_stake * 6; + assert_eq!(Staking::stake(&pool_acc).unwrap().total, total_staked); + + // lets unbond a delegator each in next eras (2, 3, 4). + start_era(2); + assert_ok!(Pools::unbond(RawOrigin::Signed(300).into(), 300, delegator_stake)); + + start_era(3); + assert_ok!(Pools::unbond(RawOrigin::Signed(301).into(), 301, delegator_stake)); + + start_era(4); + assert_ok!(Pools::unbond(RawOrigin::Signed(302).into(), 302, delegator_stake)); + System::reset_events(); + + // slash the pool at era 3 + assert_eq!( + BondedPools::::get(1).unwrap().points, + creator_stake + delegator_stake * 6 - delegator_stake * 3 + ); + pallet_staking::slashing::do_slash::( + &pool_acc, + 500, + &mut Default::default(), + &mut Default::default(), + 3, + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // 300 did not get slashed as all as it unbonded in an era before slash. + // 301 got slashed 50% of 100 = 50. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 6, balance: 50 }, + // 302 got slashed 50% of 100 = 50. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 7, balance: 50 }, + // Rest of the pool slashed 50% of 800 = 400. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 400 }, + ] + ); + + // slash is lazy and balance is still locked in user's accounts. + assert_eq!(DelegatedStaking::held_balance_of(&creator), creator_stake); + for i in 300..306 { + assert_eq!(DelegatedStaking::held_balance_of(&i), delegator_stake); + } + assert_eq!( + get_pool_agent(pool_id).ledger.effective_balance(), + Staking::total_stake(&pool_acc).unwrap() + ); + + // pending slash is book kept. + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 500); + + // go in some distant future era. + start_era(10); + System::reset_events(); + + // 300 is not slashed and can withdraw all balance. + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(300).into(), 300, 1)); + assert_eq!( + events_since_last_call(), + vec![Event::Released { agent: pool_acc, delegator: 300, amount: 100 }] + ); + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 500); + + // withdraw the other two delegators (301 and 302) who were unbonding. + for i in 301..=302 { + let pre_balance = Balances::free_balance(i); + let pre_pending_slash = get_pool_agent(pool_id).ledger.pending_slash; + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(i).into(), i, 0)); + assert_eq!( + events_since_last_call(), + vec![ + Event::Slashed { agent: pool_acc, delegator: i, amount: 50 }, + Event::Released { agent: pool_acc, delegator: i, amount: 50 }, + ] + ); + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, pre_pending_slash - 50); + assert_eq!(DelegatedStaking::held_balance_of(&i), 0); + assert_eq!(Balances::free_balance(i) - pre_balance, 50); + } + + // let's update all the slash + let slash_reporter = 99; + // give our reporter some balance. + fund(&slash_reporter, 100); + + for i in 303..306 { + let pre_pending_slash = get_pool_agent(pool_id).ledger.pending_slash; + assert_ok!(Pools::apply_slash(RawOrigin::Signed(slash_reporter).into(), i)); + + // each member is slashed 50% of 100 = 50. + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, pre_pending_slash - 50); + // left with 50. + assert_eq!(DelegatedStaking::held_balance_of(&i), 50); + } + // reporter is paid SlashRewardFraction of the slash, i.e. 10% of 50 = 5 + assert_eq!(Balances::free_balance(slash_reporter), 100 + 5 * 3); + // slash creator + assert_ok!(Pools::apply_slash(RawOrigin::Signed(slash_reporter).into(), creator)); + // all slash should be applied now. + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 0); + // for creator, 50% of stake should be slashed (250), 10% of which should go to reporter + // (25). + assert_eq!(Balances::free_balance(slash_reporter), 115 + 25); + }); + } + + fn create_pool(creator: AccountId, amount: Balance) -> u32 { + fund(&creator, amount * 2); + assert_ok!(Pools::create( + RawOrigin::Signed(creator).into(), + amount, + creator, + creator, + creator + )); + + pallet_nomination_pools::LastPoolId::::get() + } + + fn add_delegators_to_pool(pool_id: u32, delegators: Vec, amount: Balance) { + for delegator in delegators { + fund(&delegator, amount * 2); + assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), amount, pool_id)); + } + } + + fn get_pool_agent(pool_id: u32) -> Agent { + get_agent(&Pools::generate_bonded_account(pool_id)) + } +} diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs index 0bfc23281df..958d81c294a 100644 --- a/substrate/frame/delegated-staking/src/types.rs +++ b/substrate/frame/delegated-staking/src/types.rs @@ -279,7 +279,6 @@ impl Agent { /// This is similar to [Self::available_to_bond] except it also includes `unclaimed_withdrawals` /// of `Agent`. #[cfg(test)] - #[allow(unused)] pub(crate) fn total_unbonded(&self) -> BalanceOf { let bonded_stake = self.bonded_stake(); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index c00bb66ea13..2b1f1335c6f 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -322,7 +322,7 @@ fn automatic_unbonding_pools() { let init_free_balance_2 = Balances::free_balance(2); let init_free_balance_3 = Balances::free_balance(3); - let pool_bonded_account = Pools::create_bonded_account(1); + let pool_bonded_account = Pools::generate_bonded_account(1); // creates a pool with 5 bonded, owned by 1. assert_ok!(Pools::create(RuntimeOrigin::signed(1), 5, 1, 1, 1)); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 8f1775a7e59..a9512bef2d5 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -274,7 +274,7 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = sp_runtime::FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; type PostUnbondingPoolsWindow = ConstU32<2>; type PalletId = PoolsPalletId; type MaxMetadataLen = ConstU32<256>; diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 3186bce5164..3f9463a9c42 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -27,6 +27,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-bags-list = { path = "../../bags-list", default-features = false } pallet-staking = { path = "../../staking", default-features = false } +pallet-delegated-staking = { path = "../../delegated-staking", default-features = false } pallet-nomination-pools = { path = "..", default-features = false } # Substrate Primitives @@ -53,6 +54,7 @@ std = [ "frame-system/std", "pallet-bags-list/std", "pallet-balances/std", + "pallet-delegated-staking/std", "pallet-nomination-pools/std", "pallet-staking/std", "pallet-timestamp/std", @@ -72,6 +74,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-delegated-staking/runtime-benchmarks", "pallet-nomination-pools/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs index 277060e7f64..43de0fddb8b 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -23,22 +23,24 @@ use frame_support::{ assert_ok, ensure, traits::{ fungible::{Inspect, Mutate, Unbalanced}, - Get, + tokens::Preservation, + Get, Imbalance, }, }; use frame_system::RawOrigin as RuntimeOrigin; use pallet_nomination_pools::{ + adapter::{StakeStrategy, StakeStrategyType}, BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, - Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, + Pallet as Pools, PoolId, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; use pallet_staking::MaxNominationsOf; use sp_runtime::{ traits::{Bounded, StaticLookup, Zero}, Perbill, }; -use sp_staking::{EraIndex, StakingInterface}; +use sp_staking::EraIndex; use sp_std::{vec, vec::Vec}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; @@ -101,18 +103,46 @@ fn create_pool_account( let pool_account = pallet_nomination_pools::BondedPools::::iter() .find(|(_, bonded_pool)| bonded_pool.roles.depositor == pool_creator) - .map(|(pool_id, _)| Pools::::create_bonded_account(pool_id)) + .map(|(pool_id, _)| Pools::::generate_bonded_account(pool_id)) .expect("pool_creator created a pool above"); (pool_creator, pool_account) } +fn migrate_to_transfer_stake(pool_id: PoolId) { + if T::StakeAdapter::strategy_type() == StakeStrategyType::Transfer { + // should already be in the correct strategy + return; + } + let pool_acc = Pools::::generate_bonded_account(pool_id); + // drop the agent and its associated delegators . + T::StakeAdapter::remove_as_agent(&pool_acc); + + // tranfer funds from all members to the pool account. + PoolMembers::::iter() + .filter(|(_, member)| member.pool_id == pool_id) + .for_each(|(member_acc, member)| { + let member_balance = member.total_balance(); + ::Currency::transfer( + &member_acc, + &pool_acc, + member_balance, + Preservation::Preserve, + ) + .expect("member should have enough balance to transfer"); + }); +} + fn vote_to_balance( vote: u64, ) -> Result, &'static str> { vote.try_into().map_err(|_| "could not convert u64 to Balance") } +fn is_transfer_stake_strategy() -> bool { + T::StakeAdapter::strategy_type() == StakeStrategyType::Transfer +} + #[allow(unused)] struct ListScenario { /// Stash/Controller that is expected to be moved. @@ -151,7 +181,7 @@ impl ListScenario { let (pool_creator1, pool_origin1) = create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); - T::Staking::nominate( + T::StakeAdapter::nominate( &pool_origin1, // NOTE: these don't really need to be validators. vec![account("random_validator", 0, USER_SEED)], @@ -160,7 +190,7 @@ impl ListScenario { let (_, pool_origin2) = create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); - T::Staking::nominate( + T::StakeAdapter::nominate( &pool_origin2, vec![account("random_validator", 0, USER_SEED)].clone(), )?; @@ -178,7 +208,7 @@ impl ListScenario { let (_, pool_dest1) = create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); - T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; + T::StakeAdapter::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); @@ -204,11 +234,12 @@ impl ListScenario { self.origin1_member = Some(joiner.clone()); CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); - let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); + let original_bonded = T::StakeAdapter::active_stake(&self.origin1); // Unbond `amount` from the underlying pool account so when the member joins // we will maintain `current_bonded`. - T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); + T::StakeAdapter::unbond(&self.origin1, amount) + .expect("the pool was created in `Self::new`."); // Account pool points for the unbonded balance. BondedPools::::mutate(&1, |maybe_pool| { @@ -231,13 +262,20 @@ impl ListScenario { } frame_benchmarking::benchmarks! { + where_clause { + where + T: pallet_staking::Config, + pallet_staking::BalanceOf: From, + BalanceOf: Into, + } + join { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); // setup the worst case list scenario. let scenario = ListScenario::::new(origin_weight, true)?; assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), + T::StakeAdapter::active_stake(&scenario.origin1), origin_weight ); @@ -252,7 +290,7 @@ frame_benchmarking::benchmarks! { verify { assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), + T::StakeAdapter::active_stake(&scenario.origin1), scenario.dest_weight ); } @@ -267,7 +305,7 @@ frame_benchmarking::benchmarks! { }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) verify { assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= + T::StakeAdapter::active_stake(&scenario.origin1) >= scenario.dest_weight ); } @@ -283,7 +321,7 @@ frame_benchmarking::benchmarks! { let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); // transfer exactly `extra` to the depositor of the src pool (1), - let reward_account1 = Pools::::create_reward_account(1); + let reward_account1 = Pools::::generate_reward_account(1); assert!(extra >= CurrencyOf::::minimum_balance()); let _ = CurrencyOf::::mint_into(&reward_account1, extra); @@ -291,7 +329,7 @@ frame_benchmarking::benchmarks! { verify { // commission of 50% deducted here. assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= + T::StakeAdapter::active_stake(&scenario.origin1) >= scenario.dest_weight / 2u32.into() ); } @@ -302,7 +340,7 @@ frame_benchmarking::benchmarks! { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); + let reward_account = Pools::::generate_reward_account(1); // Send funds to the reward account of the pool CurrencyOf::::set_balance(&reward_account, ed + origin_weight); @@ -345,7 +383,7 @@ frame_benchmarking::benchmarks! { whitelist_account!(member_id); }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) verify { - let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); + let bonded_after = T::StakeAdapter::active_stake(&scenario.origin1); // We at least went down to the destination bag assert!(bonded_after <= scenario.dest_weight); let member = PoolMembers::::get( @@ -354,7 +392,7 @@ frame_benchmarking::benchmarks! { .unwrap(); assert_eq!( member.unbonding_eras.keys().cloned().collect::>(), - vec![0 + T::Staking::bonding_duration()] + vec![0 + T::StakeAdapter::bonding_duration()] ); assert_eq!( member.unbonding_eras.values().cloned().collect::>(), @@ -376,7 +414,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(&pool_account), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); @@ -386,7 +424,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(&pool_account), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -419,7 +457,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(&pool_account), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); @@ -430,7 +468,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(&pool_account), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -470,17 +508,17 @@ frame_benchmarking::benchmarks! { // here to ensure the complete flow for destroying a pool works - the reward pool account // should never exist by time the depositor withdraws so we test that it gets cleaned // up when unbonding. - let reward_account = Pools::::create_reward_account(1); + let reward_account = Pools::::generate_reward_account(1); assert!(frame_system::Account::::contains_key(&reward_account)); Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(&pool_account), Zero::zero() ); assert_eq!( - CurrencyOf::::balance(&pool_account), + T::StakeAdapter::total_balance(&pool_account), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -522,8 +560,8 @@ frame_benchmarking::benchmarks! { let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond - CurrencyOf::::set_balance(&depositor, min_create_bond * 2u32.into()); - + // it needs to transfer min balance to reward account as well so give additional min balance. + CurrencyOf::::set_balance(&depositor, min_create_bond + CurrencyOf::::minimum_balance() * 2u32.into()); // Make sure no Pools exist at a pre-condition for our verify checks assert_eq!(RewardPools::::count(), 0); assert_eq!(BondedPools::::count(), 0); @@ -556,8 +594,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) + T::StakeAdapter::active_stake(&Pools::::generate_bonded_account(1)), + min_create_bond ); } @@ -596,8 +634,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) + T::StakeAdapter::active_stake(&Pools::::generate_bonded_account(1)), + min_create_bond ); } @@ -681,13 +719,13 @@ frame_benchmarking::benchmarks! { .map(|i| account("stash", USER_SEED, i)) .collect(); - assert_ok!(T::Staking::nominate(&pool_account, validators)); - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); + assert_ok!(T::StakeAdapter::nominate(&pool_account, validators)); + assert!(T::StakeAdapter::nominations(&Pools::::generate_bonded_account(1)).is_some()); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1) verify { - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); + assert!(T::StakeAdapter::nominations(&Pools::::generate_bonded_account(1)).is_none()); } set_commission { @@ -786,7 +824,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(&pool_account), min_create_bond + min_join_bond ); }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) @@ -800,7 +838,7 @@ frame_benchmarking::benchmarks! { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); + let reward_account = Pools::::generate_reward_account(1); CurrencyOf::::set_balance(&reward_account, ed + origin_weight); // member claims a payout to make some commission available. @@ -829,7 +867,7 @@ frame_benchmarking::benchmarks! { let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. - let _ = Pools::::unfreeze_pool_deposit(&Pools::::create_reward_account(1)); + let _ = Pools::::unfreeze_pool_deposit(&Pools::::generate_reward_account(1)); assert!(&Pools::::check_ed_imbalance().is_err()); whitelist_account!(depositor); @@ -838,6 +876,147 @@ frame_benchmarking::benchmarks! { assert!(&Pools::::check_ed_imbalance().is_ok()); } + apply_slash { + // Note: With older `TransferStake` strategy, slashing is greedy and apply_slash should + // always fail. + + // We want to fill member's unbonding pools. So let's bond with big enough amount. + let deposit_amount = Pools::::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into(); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // verify user balance in the pool. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); + // verify delegated balance. + assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount); + + // ugly type conversion between balances of pallet staking and pools (which really are same + // type). Maybe there is a better way? + let slash_amount: u128 = deposit_amount.into()/2; + + // slash pool by half + pallet_staking::slashing::do_slash::( + &pool_account, + slash_amount.into(), + &mut pallet_staking::BalanceOf::::zero(), + &mut pallet_staking::NegativeImbalanceOf::::zero(), + EraIndex::zero() + ); + + // verify user balance is slashed in the pool. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); + // verify delegated balance are not yet slashed. + assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount); + + // Fill member's sub pools for the worst case. + for i in 1..(T::MaxUnbonding::get() + 1) { + pallet_staking::CurrentEra::::put(i); + assert!(Pools::::unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone(), Pools::::depositor_min_bond()).is_ok()); + } + + pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 2); + + let slash_reporter = create_funded_user_with_balance::("slasher", 0, CurrencyOf::::minimum_balance()); + whitelist_account!(depositor); + }: + { + let res = Pools::::apply_slash(RuntimeOrigin::Signed(slash_reporter.clone()).into(), depositor_lookup.clone()); + // for transfer stake strategy, apply slash would error, otherwise success. + assert!(is_transfer_stake_strategy::() ^ res.is_ok()); + } + verify { + // verify balances are correct and slash applied. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); + assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount/2u32.into()); + } + + apply_slash_fail { + // Bench the scenario where pool has some unapplied slash but the member does not have any + // slash to be applied. + let deposit_amount = Pools::::depositor_min_bond() * 10u32.into(); + // Create pool. + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + + // slash pool by half + let slash_amount: u128 = deposit_amount.into()/2; + pallet_staking::slashing::do_slash::( + &pool_account, + slash_amount.into(), + &mut pallet_staking::BalanceOf::::zero(), + &mut pallet_staking::NegativeImbalanceOf::::zero(), + EraIndex::zero() + ); + + pallet_staking::CurrentEra::::put(1); + + // new member joins the pool who should not be affected by slash. + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let join_amount = min_join_bond * T::MaxUnbonding::get().into() * 2u32.into(); + let joiner = create_funded_user_with_balance::("joiner", 0, join_amount * 2u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + assert!(Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), join_amount, 1).is_ok()); + + // Fill member's sub pools for the worst case. + for i in 0..T::MaxUnbonding::get() { + pallet_staking::CurrentEra::::put(i + 2); // +2 because we already set the current era to 1. + assert!(Pools::::unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone(), min_join_bond).is_ok()); + } + + pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 3); + whitelist_account!(joiner); + + }: { + // Since the StakeAdapter can be different based on the runtime config, the errors could be different as well. + assert!(Pools::::apply_slash(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone()).is_err()); + } + + + pool_migrate { + // create a pool. + let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + + // migrate pool to transfer stake. + let _ = migrate_to_transfer_stake::(1); + }: { + // Try migrate to `DelegateStake`. Would succeed only if `DelegateStake` strategy is used. + let res = Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()); + assert!(is_transfer_stake_strategy::() ^ res.is_ok()); + } + verify { + // this queries agent balance if `DelegateStake` strategy. + assert!(T::StakeAdapter::total_balance(&pool_account) == deposit_amount); + } + + migrate_delegation { + // create a pool. + let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // migrate pool to transfer stake. + let _ = migrate_to_transfer_stake::(1); + + // Now migrate pool to delegate stake keeping delegators unmigrated. + let migration_res = Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()); + assert!(is_transfer_stake_strategy::() ^ migration_res.is_ok()); + + // verify balances that we will check again later. + assert!(T::StakeAdapter::member_delegation_balance(&depositor) == Zero::zero()); + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); + + whitelist_account!(depositor); + }: { + let res = Pools::::migrate_delegation(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone()); + // for transfer stake strategy, apply slash would error, otherwise success. + assert!(is_transfer_stake_strategy::() ^ res.is_ok()); + } + verify { + // verify balances once more. + assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount); + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); + } + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(), diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index 45e8f1f27e9..910cdf2e3df 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -18,6 +18,7 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. #![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 2752d53a6b9..def98b4d294 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -77,7 +77,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); + type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = (); } @@ -120,7 +120,7 @@ impl pallet_staking::Config for Runtime { type MaxControllersInDeprecationBatch = ConstU32<100>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type EventListeners = Pools; + type EventListeners = (Pools, DelegatedStaking); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; @@ -166,7 +166,8 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; @@ -175,6 +176,20 @@ impl pallet_nomination_pools::Config for Runtime { type AdminOrigin = frame_system::EnsureRoot; } +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + impl crate::Config for Runtime {} type Block = frame_system::mocking::MockBlock; @@ -187,6 +202,7 @@ frame_support::construct_runtime!( Staking: pallet_staking, VoterList: pallet_bags_list::, Pools: pallet_nomination_pools, + DelegatedStaking: pallet_delegated_staking, } ); diff --git a/substrate/frame/nomination-pools/fuzzer/src/call.rs b/substrate/frame/nomination-pools/fuzzer/src/call.rs index 027fb2b6913..9e10d87da67 100644 --- a/substrate/frame/nomination-pools/fuzzer/src/call.rs +++ b/substrate/frame/nomination-pools/fuzzer/src/call.rs @@ -306,7 +306,7 @@ fn main() { BondedPools::::iter().for_each(|(id, _)| { let amount = random_ed_multiple(&mut rng); let _ = - Balances::deposit_creating(&Pools::create_reward_account(id), amount); + Balances::deposit_creating(&Pools::generate_reward_account(id), amount); // if we just paid out the reward agent, let's calculate how much we expect // our reward agent to have earned. if reward_agent.pool_id.map_or(false, |mid| mid == id) { diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs new file mode 100644 index 00000000000..caf4671191d --- /dev/null +++ b/substrate/frame/nomination-pools/src/adapter.rs @@ -0,0 +1,389 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use sp_staking::{DelegationInterface, DelegationMigrator}; + +/// Types of stake strategies. +/// +/// Useful for determining current staking strategy of a runtime and enforce integrity tests. +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound, PartialEq)] +pub enum StakeStrategyType { + /// Member funds are transferred to pool account and staked. + /// + /// This is the older staking strategy used by pools. For a new runtime, it is recommended to + /// use [`StakeStrategyType::Delegate`] strategy instead. + Transfer, + /// Member funds are delegated to pool account and staked. + Delegate, +} + +/// An adapter trait that can support multiple staking strategies. +/// +/// Depending on which staking strategy we want to use, the staking logic can be slightly +/// different. Refer the two possible strategies currently: [`TransferStake`] and +/// [`DelegateStake`] for more detail. +pub trait StakeStrategy { + type Balance: frame_support::traits::tokens::Balance; + type AccountId: Clone + sp_std::fmt::Debug; + type CoreStaking: StakingInterface; + + /// The type of staking strategy of the current adapter. + fn strategy_type() -> StakeStrategyType; + + /// See [`StakingInterface::bonding_duration`]. + fn bonding_duration() -> EraIndex { + Self::CoreStaking::bonding_duration() + } + + /// See [`StakingInterface::current_era`]. + fn current_era() -> EraIndex { + Self::CoreStaking::current_era() + } + + /// See [`StakingInterface::minimum_nominator_bond`]. + fn minimum_nominator_bond() -> Self::Balance { + Self::CoreStaking::minimum_nominator_bond() + } + + /// Balance that can be transferred from pool account to member. + /// + /// This is part of the pool balance that is not actively staked. That is, tokens that are + /// in unbonding period or unbonded. + fn transferable_balance(pool_account: &Self::AccountId) -> Self::Balance; + + /// Total balance of the pool including amount that is actively staked. + fn total_balance(pool_account: &Self::AccountId) -> Self::Balance; + + /// Amount of tokens delegated by the member. + fn member_delegation_balance(member_account: &Self::AccountId) -> Self::Balance; + + /// See [`StakingInterface::active_stake`]. + fn active_stake(pool_account: &Self::AccountId) -> Self::Balance { + Self::CoreStaking::active_stake(pool_account).unwrap_or_default() + } + + /// See [`StakingInterface::total_stake`]. + fn total_stake(pool_account: &Self::AccountId) -> Self::Balance { + Self::CoreStaking::total_stake(pool_account).unwrap_or_default() + } + + /// Which strategy the pool account is using. + /// + /// This can be different from the [`Self::strategy_type`] of the adapter if the pool has not + /// migrated to the new strategy yet. + fn pool_strategy(pool_account: &Self::AccountId) -> StakeStrategyType { + match Self::CoreStaking::is_virtual_staker(pool_account) { + true => StakeStrategyType::Delegate, + false => StakeStrategyType::Transfer, + } + } + + /// See [`StakingInterface::nominate`]. + fn nominate( + pool_account: &Self::AccountId, + validators: Vec, + ) -> DispatchResult { + Self::CoreStaking::nominate(pool_account, validators) + } + + /// See [`StakingInterface::chill`]. + fn chill(pool_account: &Self::AccountId) -> DispatchResult { + Self::CoreStaking::chill(pool_account) + } + + /// Pledge `amount` towards `pool_account` and update the pool bond. Also see + /// [`StakingInterface::bond`]. + fn pledge_bond( + who: &Self::AccountId, + pool_account: &Self::AccountId, + reward_account: &Self::AccountId, + amount: Self::Balance, + bond_type: BondType, + ) -> DispatchResult; + + /// See [`StakingInterface::unbond`]. + fn unbond(pool_account: &Self::AccountId, amount: Self::Balance) -> DispatchResult { + Self::CoreStaking::unbond(pool_account, amount) + } + + /// See [`StakingInterface::withdraw_unbonded`]. + fn withdraw_unbonded( + pool_account: &Self::AccountId, + num_slashing_spans: u32, + ) -> Result { + Self::CoreStaking::withdraw_unbonded(pool_account.clone(), num_slashing_spans) + } + + /// Withdraw funds from pool account to member account. + fn member_withdraw( + who: &Self::AccountId, + pool_account: &Self::AccountId, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult; + + /// Check if there is any pending slash for the pool. + fn has_pending_slash(pool_account: &Self::AccountId) -> bool; + + /// Slash the member account with `amount` against pending slashes for the pool. + fn member_slash( + who: &Self::AccountId, + pool_account: &Self::AccountId, + amount: Self::Balance, + maybe_reporter: Option, + ) -> DispatchResult; + + /// Migrate pool account from being a direct nominator to a delegated agent. + /// + /// This is useful for migrating a pool account from [`StakeStrategyType::Transfer`] to + /// [`StakeStrategyType::Delegate`]. + fn migrate_nominator_to_agent( + pool_account: &Self::AccountId, + reward_account: &Self::AccountId, + ) -> DispatchResult; + + /// Migrate member balance from pool account to member account. + /// + /// This is useful for a pool account that migrated from [`StakeStrategyType::Transfer`] to + /// [`StakeStrategyType::Delegate`]. Its members can then migrate their delegated balance + /// back to their account. + /// + /// Internally, the member funds that are locked in the pool account are transferred back and + /// locked in the member account. + fn migrate_delegation( + pool: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + ) -> DispatchResult; + + /// List of validators nominated by the pool account. + #[cfg(feature = "runtime-benchmarks")] + fn nominations(pool_account: &Self::AccountId) -> Option> { + Self::CoreStaking::nominations(pool_account) + } + + /// Remove the pool account as agent. + /// + /// Useful for migrating pool account from a delegated agent to a direct nominator. Only used + /// in tests and benchmarks. + #[cfg(feature = "runtime-benchmarks")] + fn remove_as_agent(_pool: &Self::AccountId) { + // noop by default + } +} + +/// A staking strategy implementation that supports transfer based staking. +/// +/// In order to stake, this adapter transfers the funds from the member/delegator account to the +/// pool account and stakes through the pool account on `Staking`. +/// +/// This is the older Staking strategy used by pools. To switch to the newer [`DelegateStake`] +/// strategy in an existing runtime, storage migration is required. See +/// [`migration::unversioned::DelegationStakeMigration`]. For new runtimes, it is highly recommended +/// to use the [`DelegateStake`] strategy. +pub struct TransferStake(PhantomData<(T, Staking)>); + +impl, AccountId = T::AccountId>> + StakeStrategy for TransferStake +{ + type Balance = BalanceOf; + type AccountId = T::AccountId; + type CoreStaking = Staking; + + fn strategy_type() -> StakeStrategyType { + StakeStrategyType::Transfer + } + + fn transferable_balance(pool_account: &Self::AccountId) -> BalanceOf { + T::Currency::balance(pool_account).saturating_sub(Self::active_stake(pool_account)) + } + + fn total_balance(pool_account: &Self::AccountId) -> BalanceOf { + T::Currency::total_balance(pool_account) + } + + fn member_delegation_balance(_member_account: &T::AccountId) -> Staking::Balance { + // for transfer stake, delegation balance is always zero. + Zero::zero() + } + + fn pledge_bond( + who: &T::AccountId, + pool_account: &Self::AccountId, + reward_account: &Self::AccountId, + amount: BalanceOf, + bond_type: BondType, + ) -> DispatchResult { + match bond_type { + BondType::Create => { + // first bond + T::Currency::transfer(who, pool_account, amount, Preservation::Expendable)?; + Staking::bond(pool_account, amount, &reward_account) + }, + BondType::Extra => { + // additional bond + T::Currency::transfer(who, pool_account, amount, Preservation::Preserve)?; + Staking::bond_extra(pool_account, amount) + }, + } + } + + fn member_withdraw( + who: &T::AccountId, + pool_account: &Self::AccountId, + amount: BalanceOf, + _num_slashing_spans: u32, + ) -> DispatchResult { + T::Currency::transfer(pool_account, &who, amount, Preservation::Expendable)?; + + Ok(()) + } + + fn has_pending_slash(_: &Self::AccountId) -> bool { + // for transfer stake strategy, slashing is greedy and never deferred. + false + } + + fn member_slash( + _who: &T::AccountId, + _pool: &Self::AccountId, + _amount: Staking::Balance, + _maybe_reporter: Option, + ) -> DispatchResult { + Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) + } + + fn migrate_nominator_to_agent( + _pool: &Self::AccountId, + _reward_account: &Self::AccountId, + ) -> DispatchResult { + Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) + } + + fn migrate_delegation( + _pool: &Self::AccountId, + _delegator: &Self::AccountId, + _value: Self::Balance, + ) -> DispatchResult { + Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) + } +} + +/// A staking strategy implementation that supports delegation based staking. +/// +/// In this approach, first the funds are delegated from delegator to the pool account and later +/// staked with `Staking`. The advantage of this approach is that the funds are held in the +/// user account itself and not in the pool account. +/// +/// This is the newer staking strategy used by pools. Once switched to this and migrated, ideally +/// the `TransferStake` strategy should not be used. Or a separate migration would be required for +/// it which is not provided by this pallet. +/// +/// Use [`migration::unversioned::DelegationStakeMigration`] to migrate to this strategy. +pub struct DelegateStake( + PhantomData<(T, Staking, Delegation)>, +); + +impl< + T: Config, + Staking: StakingInterface, AccountId = T::AccountId>, + Delegation: DelegationInterface, AccountId = T::AccountId> + + DelegationMigrator, AccountId = T::AccountId>, + > StakeStrategy for DelegateStake +{ + type Balance = BalanceOf; + type AccountId = T::AccountId; + type CoreStaking = Staking; + + fn strategy_type() -> StakeStrategyType { + StakeStrategyType::Delegate + } + + fn transferable_balance(pool_account: &Self::AccountId) -> BalanceOf { + Delegation::agent_balance(pool_account).saturating_sub(Self::active_stake(pool_account)) + } + + fn total_balance(pool_account: &Self::AccountId) -> BalanceOf { + Delegation::agent_balance(pool_account) + } + + fn member_delegation_balance(member_account: &T::AccountId) -> BalanceOf { + Delegation::delegator_balance(member_account) + } + + fn pledge_bond( + who: &T::AccountId, + pool_account: &Self::AccountId, + reward_account: &Self::AccountId, + amount: BalanceOf, + bond_type: BondType, + ) -> DispatchResult { + match bond_type { + BondType::Create => { + // first delegation + Delegation::delegate(who, pool_account, reward_account, amount) + }, + BondType::Extra => { + // additional delegation + Delegation::delegate_extra(who, pool_account, amount) + }, + } + } + + fn member_withdraw( + who: &T::AccountId, + pool_account: &Self::AccountId, + amount: BalanceOf, + num_slashing_spans: u32, + ) -> DispatchResult { + Delegation::withdraw_delegation(&who, pool_account, amount, num_slashing_spans) + } + + fn has_pending_slash(pool_account: &Self::AccountId) -> bool { + Delegation::has_pending_slash(pool_account) + } + + fn member_slash( + who: &T::AccountId, + pool_account: &Self::AccountId, + amount: BalanceOf, + maybe_reporter: Option, + ) -> DispatchResult { + Delegation::delegator_slash(pool_account, who, amount, maybe_reporter) + } + + fn migrate_nominator_to_agent( + pool: &Self::AccountId, + reward_account: &Self::AccountId, + ) -> DispatchResult { + Delegation::migrate_nominator_to_agent(pool, reward_account) + } + + fn migrate_delegation( + pool: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + ) -> DispatchResult { + Delegation::migrate_delegation(pool, delegator, value) + } + + #[cfg(feature = "runtime-benchmarks")] + fn remove_as_agent(pool: &Self::AccountId) { + Delegation::drop_agent(pool) + } +} diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 95d23f2280a..816334c1a08 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -351,6 +351,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +use adapter::StakeStrategy; use codec::Codec; use frame_support::{ defensive, defensive_assert, ensure, @@ -397,6 +398,7 @@ pub mod mock; #[cfg(test)] mod tests; +pub mod adapter; pub mod migration; pub mod weights; @@ -425,11 +427,11 @@ pub enum ConfigOp { } /// The type of bonding that can happen to a pool. -enum BondType { +pub enum BondType { /// Someone is bonding into the pool upon creation. Create, /// Someone is adding more funds later to this pool. - Later, + Extra, } /// How to increase the bond of a member. @@ -549,9 +551,19 @@ impl PoolMember { /// Total balance of the member, both active and unbonding. /// Doesn't mutate state. - #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] - fn total_balance(&self) -> BalanceOf { - let pool = BondedPool::::get(self.pool_id).unwrap(); + /// + /// Worst case, iterates over [`TotalUnbondingPools`] member unbonding pools to calculate member + /// balance. + pub fn total_balance(&self) -> BalanceOf { + let pool = match BondedPool::::get(self.pool_id) { + Some(pool) => pool, + None => { + // this internal function is always called with a valid pool id. + defensive!("pool should exist; qed"); + return Zero::zero(); + }, + }; + let active_balance = pool.points_to_balance(self.active_points()); let sub_pools = match SubPoolsStorage::::get(self.pool_id) { @@ -973,12 +985,12 @@ impl BondedPool { /// Get the bonded account id of this pool. fn bonded_account(&self) -> T::AccountId { - Pallet::::create_bonded_account(self.id) + Pallet::::generate_bonded_account(self.id) } /// Get the reward account id of this pool. fn reward_account(&self) -> T::AccountId { - Pallet::::create_reward_account(self.id) + Pallet::::generate_reward_account(self.id) } /// Consume self and put into storage. @@ -995,8 +1007,7 @@ impl BondedPool { /// /// This is often used for bonding and issuing new funds into the pool. fn balance_to_point(&self, new_funds: BalanceOf) -> BalanceOf { - let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + let bonded_balance = T::StakeAdapter::active_stake(&self.bonded_account()); Pallet::::balance_to_point(bonded_balance, self.points, new_funds) } @@ -1004,8 +1015,7 @@ impl BondedPool { /// /// This is often used for unbonding. fn points_to_balance(&self, points: BalanceOf) -> BalanceOf { - let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + let bonded_balance = T::StakeAdapter::active_stake(&self.bonded_account()); Pallet::::point_to_balance(bonded_balance, self.points, points) } @@ -1052,18 +1062,6 @@ impl BondedPool { self } - /// The pools balance that is transferable provided it is expendable by staking pallet. - fn transferable_balance(&self) -> BalanceOf { - let account = self.bonded_account(); - // Note on why we can't use `Currency::reducible_balance`: Since pooled account has a - // provider (staking pallet), the account can not be set expendable by - // `pallet-nomination-pool`. This means reducible balance always returns balance preserving - // ED in the account. What we want though is transferable balance given the account can be - // dusted. - T::Currency::balance(&account) - .saturating_sub(T::Staking::active_stake(&account).unwrap_or_default()) - } - fn is_root(&self, who: &T::AccountId) -> bool { self.roles.root.as_ref().map_or(false, |root| root == who) } @@ -1127,8 +1125,7 @@ impl BondedPool { fn ok_to_be_open(&self) -> Result<(), DispatchError> { ensure!(!self.is_destroying(), Error::::CanNotChangeState); - let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + let bonded_balance = T::StakeAdapter::active_stake(&self.bonded_account()); ensure!(!bonded_balance.is_zero(), Error::::OverflowRisk); let points_to_balance_ratio_floor = self @@ -1257,28 +1254,17 @@ impl BondedPool { amount: BalanceOf, ty: BondType, ) -> Result, DispatchError> { - // Cache the value - let bonded_account = self.bonded_account(); - T::Currency::transfer( - who, - &bonded_account, - amount, - match ty { - BondType::Create => Preservation::Expendable, - BondType::Later => Preservation::Preserve, - }, - )?; // We must calculate the points issued *before* we bond who's funds, else points:balance // ratio will be wrong. let points_issued = self.issue(amount); - match ty { - BondType::Create => T::Staking::bond(&bonded_account, amount, &self.reward_account())?, - // The pool should always be created in such a way its in a state to bond extra, but if - // the active balance is slashed below the minimum bonded or the account cannot be - // found, we exit early. - BondType::Later => T::Staking::bond_extra(&bonded_account, amount)?, - } + T::StakeAdapter::pledge_bond( + who, + &self.bonded_account(), + &self.reward_account(), + amount, + ty, + )?; TotalValueLocked::::mutate(|tvl| { tvl.saturating_accrue(amount); }); @@ -1456,7 +1442,7 @@ impl RewardPool { /// This is sum of all the rewards that are claimable by pool members. fn current_balance(id: PoolId) -> BalanceOf { T::Currency::reducible_balance( - &Pallet::::create_reward_account(id), + &Pallet::::generate_reward_account(id), Preservation::Expendable, Fortitude::Polite, ) @@ -1569,7 +1555,7 @@ impl Get for TotalUnbondingPools { // NOTE: this may be dangerous in the scenario bonding_duration gets decreased because // we would no longer be able to decode `BoundedBTreeMap::, // TotalUnbondingPools>`, which uses `TotalUnbondingPools` as the bound - T::Staking::bonding_duration() + T::PostUnbondingPoolsWindow::get() + T::StakeAdapter::bonding_duration() + T::PostUnbondingPoolsWindow::get() } } @@ -1646,7 +1632,9 @@ pub mod pallet { type U256ToBalance: Convert>; /// The interface for nominating. - type Staking: StakingInterface, AccountId = Self::AccountId>; + /// + /// Note: Switching to a new [`StakeStrategy`] might require a migration of the storage. + type StakeAdapter: StakeStrategy>; /// The amount of eras a `SubPools::with_era` pool can exist before it gets merged into the /// `SubPools::no_era` pool. In other words, this is the amount of eras a member will be @@ -1950,6 +1938,16 @@ pub mod pallet { BondExtraRestricted, /// No imbalance in the ED deposit for the pool. NothingToAdjust, + /// No slash pending that can be applied to the member. + NothingToSlash, + /// No delegation to migrate. + NoDelegationToMigrate, + /// The pool has already migrated to enable delegation. + PoolAlreadyMigrated, + /// The pool has not migrated yet to enable delegation. + PoolNotMigrated, + /// This call is not allowed in the current state of the pallet. + NotSupported, } #[derive(Encode, Decode, PartialEq, TypeInfo, PalletError, RuntimeDebug)] @@ -1965,6 +1963,10 @@ pub mod pallet { /// The bonded account should only be killed by the staking system when the depositor is /// withdrawing BondedStashKilledPrematurely, + /// The delegation feature is unsupported. + DelegationUnsupported, + /// Unable to slash to the member of the pool. + SlashNotApplied, } impl From for Error { @@ -2019,7 +2021,7 @@ pub mod pallet { )?; bonded_pool.try_inc_members()?; - let points_issued = bonded_pool.try_bond_funds(&who, amount, BondType::Later)?; + let points_issued = bonded_pool.try_bond_funds(&who, amount, BondType::Extra)?; PoolMembers::insert( who.clone(), @@ -2141,12 +2143,12 @@ pub mod pallet { &mut reward_pool, )?; - let current_era = T::Staking::current_era(); - let unbond_era = T::Staking::bonding_duration().saturating_add(current_era); + let current_era = T::StakeAdapter::current_era(); + let unbond_era = T::StakeAdapter::bonding_duration().saturating_add(current_era); // Unbond in the actual underlying nominator. let unbonding_balance = bonded_pool.dissolve(unbonding_points); - T::Staking::unbond(&bonded_pool.bonded_account(), unbonding_balance)?; + T::StakeAdapter::unbond(&bonded_pool.bonded_account(), unbonding_balance)?; // Note that we lazily create the unbonding pools here if they don't already exist let mut sub_pools = SubPoolsStorage::::get(member.pool_id) @@ -2209,7 +2211,7 @@ pub mod pallet { // For now we only allow a pool to withdraw unbonded if its not destroying. If the pool // is destroying then `withdraw_unbonded` can be used. ensure!(pool.state != PoolState::Destroying, Error::::NotDestroying); - T::Staking::withdraw_unbonded(pool.bonded_account(), num_slashing_spans)?; + T::StakeAdapter::withdraw_unbonded(&pool.bonded_account(), num_slashing_spans)?; Ok(()) } @@ -2232,7 +2234,10 @@ pub mod pallet { /// /// # Note /// - /// If the target is the depositor, the pool will be destroyed. + /// - If the target is the depositor, the pool will be destroyed. + /// - If the pool has any pending slash, we also try to slash the member before letting them + /// withdraw. This calculation adds some weight overhead and is only defensive. In reality, + /// pool slashes must have been already applied via permissionless [`Call::apply_slash`]. #[pallet::call_index(5)] #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) @@ -2246,13 +2251,30 @@ pub mod pallet { let member_account = T::Lookup::lookup(member_account)?; let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; - let current_era = T::Staking::current_era(); + let current_era = T::StakeAdapter::current_era(); let bonded_pool = BondedPool::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::PoolNotFound.into())?; let mut sub_pools = SubPoolsStorage::::get(member.pool_id).ok_or(Error::::SubPoolsNotFound)?; + let slash_weight = + // apply slash if any before withdraw. + match Self::do_apply_slash(&member_account, None) { + Ok(_) => T::WeightInfo::apply_slash(), + Err(e) => { + let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); + // This is an expected error. We add appropriate fees and continue withdrawal. + if Err(e) == no_pending_slash { + T::WeightInfo::apply_slash_fail() + } else { + // defensive: if we can't apply slash for some reason, we abort. + return Err(Error::::Defensive(DefensiveError::SlashNotApplied).into()); + } + } + + }; + bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; let pool_account = bonded_pool.bonded_account(); @@ -2261,9 +2283,11 @@ pub mod pallet { ensure!(!withdrawn_points.is_empty(), Error::::CannotWithdrawAny); // Before calculating the `balance_to_unbond`, we call withdraw unbonded to ensure the - // `transferrable_balance` is correct. - let stash_killed = - T::Staking::withdraw_unbonded(pool_account.clone(), num_slashing_spans)?; + // `transferable_balance` is correct. + let stash_killed = T::StakeAdapter::withdraw_unbonded( + &bonded_pool.bonded_account(), + num_slashing_spans, + )?; // defensive-only: the depositor puts enough funds into the stash so that it will only // be destroyed when they are leaving. @@ -2310,15 +2334,16 @@ pub mod pallet { // don't exist. This check is also defensive in cases where the unbond pool does not // update its balance (e.g. a bug in the slashing hook.) We gracefully proceed in // order to ensure members can leave the pool and it can be destroyed. - .min(bonded_pool.transferable_balance()); + .min(T::StakeAdapter::transferable_balance(&bonded_pool.bonded_account())); - T::Currency::transfer( - &bonded_pool.bonded_account(), + // this can fail if the pool uses `DelegateStake` strategy and the member delegation + // is not claimed yet. See `Call::migrate_delegation()`. + T::StakeAdapter::member_withdraw( &member_account, + &bonded_pool.bonded_account(), balance_to_unbond, - Preservation::Expendable, - ) - .defensive()?; + num_slashing_spans, + )?; Self::deposit_event(Event::::Withdrawn { member: member_account.clone(), @@ -2340,20 +2365,20 @@ pub mod pallet { if member_account == bonded_pool.roles.depositor { Pallet::::dissolve_pool(bonded_pool); - None + Weight::default() } else { bonded_pool.dec_members().put(); SubPoolsStorage::::insert(member.pool_id, sub_pools); - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + T::WeightInfo::withdraw_unbonded_update(num_slashing_spans) } } else { // we certainly don't need to delete any pools, because no one is being removed. SubPoolsStorage::::insert(member.pool_id, sub_pools); PoolMembers::::insert(&member_account, member); - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + T::WeightInfo::withdraw_unbonded_update(num_slashing_spans) }; - Ok(post_info_weight.into()) + Ok(Some(post_info_weight.saturating_add(slash_weight)).into()) } /// Create a new delegation pool. @@ -2448,7 +2473,7 @@ pub mod pallet { Error::::MinimumBondNotMet ); - T::Staking::nominate(&bonded_pool.bonded_account(), validators) + T::StakeAdapter::nominate(&bonded_pool.bonded_account(), validators) } /// Set a new state for the pool. @@ -2636,12 +2661,12 @@ pub mod pallet { .active_points(); if bonded_pool.points_to_balance(depositor_points) >= - T::Staking::minimum_nominator_bond() + T::StakeAdapter::minimum_nominator_bond() { ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); } - T::Staking::chill(&bonded_pool.bonded_account()) + T::StakeAdapter::chill(&bonded_pool.bonded_account()) } /// `origin` bonds funds from `extra` for some pool member `member` into their respective @@ -2838,6 +2863,119 @@ pub mod pallet { Ok(()) } + + /// Apply a pending slash on a member. + /// + /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// This call can be dispatched permissionlessly (i.e. by any account). If the member has + /// slash to be applied, caller may be rewarded with the part of the slash. + #[pallet::call_index(23)] + #[pallet::weight(T::WeightInfo::apply_slash())] + pub fn apply_slash( + origin: OriginFor, + member_account: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + Error::::NotSupported + ); + + let who = ensure_signed(origin)?; + let member_account = T::Lookup::lookup(member_account)?; + Self::do_apply_slash(&member_account, Some(who))?; + + // If successful, refund the fees. + Ok(Pays::No.into()) + } + + /// Migrates delegated funds from the pool account to the `member_account`. + /// + /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// This is a permission-less call and refunds any fee if claim is successful. + /// + /// If the pool has migrated to delegation based staking, the staked tokens of pool members + /// can be moved and held in their own account. See [`adapter::DelegateStake`] + #[pallet::call_index(24)] + #[pallet::weight(T::WeightInfo::migrate_delegation())] + pub fn migrate_delegation( + origin: OriginFor, + member_account: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { + let _caller = ensure_signed(origin)?; + + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + Error::::NotSupported + ); + + let member_account = T::Lookup::lookup(member_account)?; + let member = + PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; + + // ensure pool is migrated. + ensure!( + T::StakeAdapter::pool_strategy(&Self::generate_bonded_account(member.pool_id)) == + adapter::StakeStrategyType::Delegate, + Error::::PoolNotMigrated + ); + + let pool_contribution = member.total_balance(); + ensure!(pool_contribution >= MinJoinBond::::get(), Error::::MinimumBondNotMet); + // the member must have some contribution to be migrated. + ensure!(pool_contribution > Zero::zero(), Error::::NoDelegationToMigrate); + + let delegation = T::StakeAdapter::member_delegation_balance(&member_account); + // delegation can be claimed only once. + ensure!(delegation == Zero::zero(), Error::::NoDelegationToMigrate); + + let diff = pool_contribution.defensive_saturating_sub(delegation); + T::StakeAdapter::migrate_delegation( + &Pallet::::generate_bonded_account(member.pool_id), + &member_account, + diff, + )?; + + // if successful, we refund the fee. + Ok(Pays::No.into()) + } + + /// Migrate pool from [`adapter::StakeStrategyType::Transfer`] to + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// This call can be dispatched permissionlessly, and refunds any fee if successful. + /// + /// If the pool has already migrated to delegation based staking, this call will fail. + #[pallet::call_index(25)] + #[pallet::weight(T::WeightInfo::pool_migrate())] + pub fn migrate_pool_to_delegate_stake( + origin: OriginFor, + pool_id: PoolId, + ) -> DispatchResultWithPostInfo { + // gate this call to be called only if `DelegateStake` strategy is used. + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + Error::::NotSupported + ); + + let _caller = ensure_signed(origin)?; + // ensure pool exists. + let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + ensure!( + T::StakeAdapter::pool_strategy(&bonded_pool.bonded_account()) == + adapter::StakeStrategyType::Transfer, + Error::::PoolAlreadyMigrated + ); + + Self::migrate_to_delegate_stake(pool_id)?; + Ok(Pays::No.into()) + } } #[pallet::hooks] @@ -2853,7 +2991,7 @@ pub mod pallet { "Minimum points to balance ratio must be greater than 0" ); assert!( - T::Staking::bonding_duration() < TotalUnbondingPools::::get(), + T::StakeAdapter::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / so a slash can be applied to relevant unbonding pools. (We assume / the bonding duration > slash deffer duration.", @@ -2871,7 +3009,7 @@ impl Pallet { /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former /// is coming from the staking pallet and the latter two are configured in this pallet. pub fn depositor_min_bond() -> BalanceOf { - T::Staking::minimum_nominator_bond() + T::StakeAdapter::minimum_nominator_bond() .max(MinCreateBond::::get()) .max(MinJoinBond::::get()) .max(T::Currency::minimum_balance()) @@ -2907,7 +3045,7 @@ impl Pallet { "bonded account of dissolving pool should have no consumers" ); defensive_assert!( - T::Staking::total_stake(&bonded_account).unwrap_or_default() == Zero::zero(), + T::StakeAdapter::total_stake(&bonded_pool.bonded_account()) == Zero::zero(), "dissolving pool should not have any stake in the staking pallet" ); @@ -2930,11 +3068,12 @@ impl Pallet { "could not transfer all amount to depositor while dissolving pool" ); defensive_assert!( - T::Currency::total_balance(&bonded_pool.bonded_account()) == Zero::zero(), + T::StakeAdapter::total_balance(&bonded_pool.bonded_account()) == Zero::zero(), "dissolving pool should not have any balance" ); // NOTE: Defensively force set balance to zero. T::Currency::set_balance(&reward_account, Zero::zero()); + // With `DelegateStake` strategy, this won't do anything. T::Currency::set_balance(&bonded_pool.bonded_account(), Zero::zero()); Self::deposit_event(Event::::Destroyed { pool_id: bonded_pool.id }); @@ -2945,12 +3084,19 @@ impl Pallet { } /// Create the main, bonded account of a pool with the given id. - pub fn create_bonded_account(id: PoolId) -> T::AccountId { + pub fn generate_bonded_account(id: PoolId) -> T::AccountId { T::PalletId::get().into_sub_account_truncating((AccountType::Bonded, id)) } + fn migrate_to_delegate_stake(id: PoolId) -> DispatchResult { + T::StakeAdapter::migrate_nominator_to_agent( + &Self::generate_bonded_account(id), + &Self::generate_reward_account(id), + ) + } + /// Create the reward account of a pool with the given id. - pub fn create_reward_account(id: PoolId) -> T::AccountId { + pub fn generate_reward_account(id: PoolId) -> T::AccountId { // NOTE: in order to have a distinction in the test account id type (u128), we put // account_type first so it does not get truncated out. T::PalletId::get().into_sub_account_truncating((AccountType::Reward, id)) @@ -3194,9 +3340,9 @@ impl Pallet { let (points_issued, bonded) = match extra { BondExtra::FreeBalance(amount) => - (bonded_pool.try_bond_funds(&member_account, amount, BondType::Later)?, amount), + (bonded_pool.try_bond_funds(&member_account, amount, BondType::Extra)?, amount), BondExtra::Rewards => - (bonded_pool.try_bond_funds(&member_account, claimed, BondType::Later)?, claimed), + (bonded_pool.try_bond_funds(&member_account, claimed, BondType::Extra)?, claimed), }; bonded_pool.ok_to_be_open()?; @@ -3317,6 +3463,36 @@ impl Pallet { Ok(()) } + /// Slash member against the pending slash for the pool. + fn do_apply_slash( + member_account: &T::AccountId, + reporter: Option, + ) -> DispatchResult { + // calculate points to be slashed. + let member = + PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; + + let pool_account = Pallet::::generate_bonded_account(member.pool_id); + ensure!(T::StakeAdapter::has_pending_slash(&pool_account), Error::::NothingToSlash); + + let unslashed_balance = T::StakeAdapter::member_delegation_balance(&member_account); + let slashed_balance = member.total_balance(); + defensive_assert!( + unslashed_balance >= slashed_balance, + "unslashed balance should always be greater or equal to the slashed" + ); + + // if nothing to slash, return error. + ensure!(unslashed_balance > slashed_balance, Error::::NothingToSlash); + + T::StakeAdapter::member_slash( + &member_account, + &pool_account, + unslashed_balance.defensive_saturating_sub(slashed_balance), + reporter, + ) + } + /// Apply freeze on reward account to restrict it from going below ED. pub(crate) fn freeze_pool_deposit(reward_acc: &T::AccountId) -> DispatchResult { T::Currency::set_freeze( @@ -3395,7 +3571,7 @@ impl Pallet { ); for id in reward_pools { - let account = Self::create_reward_account(id); + let account = Self::generate_reward_account(id); if T::Currency::reducible_balance(&account, Preservation::Expendable, Fortitude::Polite) < T::Currency::minimum_balance() { @@ -3480,8 +3656,7 @@ impl Pallet { pool is being destroyed and the depositor is the last member", ); - expected_tvl += - T::Staking::total_stake(&bonded_pool.bonded_account()).unwrap_or_default(); + expected_tvl += T::StakeAdapter::total_stake(&bonded_pool.bonded_account()); Ok(()) })?; @@ -3506,19 +3681,28 @@ impl Pallet { } for (pool_id, _pool) in BondedPools::::iter() { - let pool_account = Pallet::::create_bonded_account(pool_id); + let pool_account = Pallet::::generate_bonded_account(pool_id); let subs = SubPoolsStorage::::get(pool_id).unwrap_or_default(); let sum_unbonding_balance = subs.sum_unbonding_balance(); - let bonded_balance = T::Staking::active_stake(&pool_account).unwrap_or_default(); - let total_balance = T::Currency::total_balance(&pool_account); + let bonded_balance = T::StakeAdapter::active_stake(&pool_account); + let total_balance = T::StakeAdapter::total_balance(&pool_account); + + // At the time when StakeAdapter is changed but migration is not yet done, the new + // adapter would return zero balance (as it is not an agent yet). We handle that by + // falling back to reading actual balance of the pool account. + let pool_balance = if total_balance.is_zero() { + T::Currency::total_balance(&pool_account) + } else { + total_balance + }; assert!( - total_balance >= bonded_balance + sum_unbonding_balance, - "faulty pool: {:?} / {:?}, total_balance {:?} >= bonded_balance {:?} + sum_unbonding_balance {:?}", + pool_balance >= bonded_balance + sum_unbonding_balance, + "faulty pool: {:?} / {:?}, pool_balance {:?} >= bonded_balance {:?} + sum_unbonding_balance {:?}", pool_id, _pool, - total_balance, + pool_balance, bonded_balance, sum_unbonding_balance ); @@ -3544,7 +3728,7 @@ impl Pallet { pub fn check_ed_imbalance() -> Result<(), DispatchError> { let mut failed: u32 = 0; BondedPools::::iter_keys().for_each(|id| { - let reward_acc = Self::create_reward_account(id); + let reward_acc = Self::generate_reward_account(id); let frozen_balance = T::Currency::balance_frozen(&FreezeReason::PoolMinBalance.into(), &reward_acc); @@ -3615,7 +3799,7 @@ impl Pallet { pub fn api_balance_to_points(pool_id: PoolId, new_funds: BalanceOf) -> BalanceOf { if let Some(pool) = BondedPool::::get(pool_id) { let bonded_balance = - T::Staking::active_stake(&pool.bonded_account()).unwrap_or(Zero::zero()); + T::StakeAdapter::active_stake(&Self::generate_bonded_account(pool_id)); Pallet::::balance_to_point(bonded_balance, pool.points, new_funds) } else { Zero::zero() diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 796b310862a..a3989559dfb 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -107,6 +107,137 @@ pub mod unversioned { Ok(()) } } + + /// Migrate existing pools from [`adapter::StakeStrategyType::Transfer`] to + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// Note: This only migrates the pools, the members are not migrated. They can use the + /// permission-less [`Pallet::migrate_delegation()`] to migrate their funds. + /// + /// This migration does not break any existing pool storage item, does not need to happen in any + /// sequence and hence can be applied unversioned on a production runtime. + /// + /// Takes `MaxPools` as type parameter to limit the number of pools that should be migrated in a + /// single block. It should be set such that migration weight does not exceed the block weight + /// limit. If all pools can be safely migrated, it is good to keep this number a little higher + /// than the actual number of pools to handle any extra pools created while the migration is + /// proposed, and before it is executed. + /// + /// If there are pools that fail to migrate or did not fit in the bounds, the remaining pools + /// can be migrated via the permission-less extrinsic [`Call::migrate_pool_to_delegate_stake`]. + pub struct DelegationStakeMigration(sp_std::marker::PhantomData<(T, MaxPools)>); + + impl> OnRuntimeUpgrade for DelegationStakeMigration { + fn on_runtime_upgrade() -> Weight { + let mut count: u32 = 0; + + BondedPools::::iter_keys().take(MaxPools::get() as usize).for_each(|id| { + let pool_acc = Pallet::::generate_bonded_account(id); + + // only migrate if the pool is in Transfer Strategy. + if T::StakeAdapter::pool_strategy(&pool_acc) == adapter::StakeStrategyType::Transfer + { + let _ = Pallet::::migrate_to_delegate_stake(id).map_err(|err| { + log!( + warn, + "failed to migrate pool {:?} to delegate stake strategy with err: {:?}", + id, + err + ) + }); + count.saturating_inc(); + } + }); + + log!(info, "migrated {:?} pools to delegate stake strategy", count); + + // reads: (bonded pool key + current pool strategy) * MaxPools (worst case) + T::DbWeight::get() + .reads_writes(2, 0) + .saturating_mul(MaxPools::get() as u64) + // migration weight: `pool_migrate` weight * count + .saturating_add(T::WeightInfo::pool_migrate().saturating_mul(count.into())) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + // ensure stake adapter is correct. + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + "Current strategy is not `Delegate" + ); + + if BondedPools::::count() > MaxPools::get() { + // we log a warning if the number of pools exceeds the bound. + log!( + warn, + "Number of pools {} exceeds the maximum bound {}. This would leave some pools unmigrated.", BondedPools::::count(), MaxPools::get() + ); + } + + let mut pool_balances: Vec> = Vec::new(); + BondedPools::::iter_keys().take(MaxPools::get() as usize).for_each(|id| { + let pool_account = Pallet::::generate_bonded_account(id); + let current_strategy = T::StakeAdapter::pool_strategy(&pool_account); + + // we ensure migration is idempotent. + let pool_balance = if current_strategy == adapter::StakeStrategyType::Transfer { + T::Currency::total_balance(&pool_account) + } else { + T::StakeAdapter::total_balance(&pool_account) + }; + + pool_balances.push(pool_balance); + }); + + Ok(pool_balances.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(data: Vec) -> Result<(), TryRuntimeError> { + let expected_pool_balances: Vec> = Decode::decode(&mut &data[..]).unwrap(); + + for (index, id) in + BondedPools::::iter_keys().take(MaxPools::get() as usize).enumerate() + { + let pool_account = Pallet::::generate_bonded_account(id); + if T::StakeAdapter::pool_strategy(&pool_account) == + adapter::StakeStrategyType::Transfer + { + log!(error, "Pool {} failed to migrate", id,); + return Err(TryRuntimeError::Other("Pool failed to migrate")); + } + + let actual_balance = T::StakeAdapter::total_balance(&pool_account); + let expected_balance = expected_pool_balances.get(index).unwrap(); + + if actual_balance != *expected_balance { + log!( + error, + "Pool {} balance mismatch. Expected: {:?}, Actual: {:?}", + id, + expected_balance, + actual_balance + ); + return Err(TryRuntimeError::Other("Pool balance mismatch")); + } + + // account balance should be zero. + let pool_account_balance = T::Currency::total_balance(&pool_account); + if pool_account_balance != Zero::zero() { + log!( + error, + "Pool account balance was expected to be zero. Pool: {}, Balance: {:?}", + id, + pool_account_balance + ); + return Err(TryRuntimeError::Other("Pool account balance not migrated")); + } + } + + Ok(()) + } + } } pub mod v8 { @@ -201,7 +332,7 @@ pub(crate) mod v7 { impl V7BondedPool { #[allow(dead_code)] fn bonded_account(&self) -> T::AccountId { - Pallet::::create_bonded_account(self.id) + Pallet::::generate_bonded_account(self.id) } } @@ -275,7 +406,7 @@ mod v6 { impl MigrateToV6 { fn freeze_ed(pool_id: PoolId) -> Result<(), ()> { - let reward_acc = Pallet::::create_reward_account(pool_id); + let reward_acc = Pallet::::generate_reward_account(pool_id); Pallet::::freeze_pool_deposit(&reward_acc).map_err(|e| { log!(error, "Failed to freeze ED for pool {} with error: {:?}", pool_id, e); () @@ -760,7 +891,7 @@ pub mod v2 { }; let accumulated_reward = RewardPool::::current_balance(id); - let reward_account = Pallet::::create_reward_account(id); + let reward_account = Pallet::::generate_reward_account(id); let mut sum_paid_out = BalanceOf::::zero(); members @@ -882,7 +1013,7 @@ pub mod v2 { // all reward accounts must have more than ED. RewardPools::::iter().try_for_each(|(id, _)| -> Result<(), TryRuntimeError> { ensure!( - >::balance(&Pallet::::create_reward_account(id)) >= + >::balance(&Pallet::::generate_reward_account(id)) >= T::Currency::minimum_balance(), "Reward accounts must have greater balance than ED." ); @@ -1022,11 +1153,8 @@ mod helpers { use super::*; pub(crate) fn calculate_tvl_by_total_stake() -> BalanceOf { - BondedPools::::iter() - .map(|(id, inner)| { - T::Staking::total_stake(&BondedPool { id, inner: inner.clone() }.bonded_account()) - .unwrap_or_default() - }) + BondedPools::::iter_keys() + .map(|id| T::StakeAdapter::total_stake(&Pallet::::generate_bonded_account(id))) .reduce(|acc, total_balance| acc + total_balance) .unwrap_or_default() } diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index e34719a7b80..b659c975a83 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -36,12 +36,12 @@ pub type Currency = ::Currency; // Ext builder creates a pool with id 1. pub fn default_bonded_account() -> AccountId { - Pools::create_bonded_account(1) + Pools::generate_bonded_account(1) } // Ext builder creates a pool with id 1. pub fn default_reward_account() -> AccountId { - Pools::create_reward_account(1) + Pools::generate_reward_account(1) } parameter_types! { @@ -71,7 +71,7 @@ impl StakingMock { /// Does not modify any [`SubPools`] of the pool as [`Default::default`] is passed for /// `slashed_unlocking`. pub fn slash_by(pool_id: PoolId, amount: Balance) { - let acc = Pools::create_bonded_account(pool_id); + let acc = Pools::generate_bonded_account(pool_id); let bonded = BondedBalanceMap::get(); let pre_total = bonded.get(&acc).unwrap(); Self::set_bonded_balance(acc, pre_total - amount); @@ -111,6 +111,10 @@ impl sp_staking::StakingInterface for StakingMock { .ok_or(DispatchError::Other("NotStash")) } + fn is_virtual_staker(_who: &Self::AccountId) -> bool { + false + } + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); x.get_mut(who).map(|v| *v += extra); @@ -314,7 +318,7 @@ impl pools::Config for Runtime { type RewardCounter = RewardCounter; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = StakingMock; + type StakeAdapter = adapter::TransferStake; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type PalletId = PoolsPalletId; type MaxMetadataLen = MaxMetadataLen; diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 535e7537469..8fc339c695b 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -95,8 +95,8 @@ fn test_setup_works() { PoolMember:: { pool_id: last_pool, points: 10, ..Default::default() } ); - let bonded_account = Pools::create_bonded_account(last_pool); - let reward_account = Pools::create_reward_account(last_pool); + let bonded_account = Pools::generate_bonded_account(last_pool); + let reward_account = Pools::generate_reward_account(last_pool); // the bonded_account should be bonded by the depositor's funds. assert_eq!(StakingMock::active_stake(&bonded_account).unwrap(), 10); @@ -728,7 +728,7 @@ mod join { ); // Force the pools bonded balance to 0, simulating a 100% slash - StakingMock::set_bonded_balance(Pools::create_bonded_account(1), 0); + StakingMock::set_bonded_balance(Pools::generate_bonded_account(1), 0); assert_noop!( Pools::join(RuntimeOrigin::signed(11), 420, 1), Error::::OverflowRisk @@ -755,7 +755,7 @@ mod join { <::MaxPointsToBalance as Get>::get().into(); StakingMock::set_bonded_balance( - Pools::create_bonded_account(123), + Pools::generate_bonded_account(123), max_points_to_balance, ); assert_noop!( @@ -764,7 +764,7 @@ mod join { ); StakingMock::set_bonded_balance( - Pools::create_bonded_account(123), + Pools::generate_bonded_account(123), Balance::MAX / max_points_to_balance, ); // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` @@ -773,7 +773,10 @@ mod join { TokenError::FundsUnavailable, ); - StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); + StakingMock::set_bonded_balance( + Pools::generate_bonded_account(1), + max_points_to_balance, + ); // Cannot join a pool that isn't open unsafe_set_state(123, PoolState::Blocked); @@ -804,7 +807,7 @@ mod join { #[cfg_attr(not(debug_assertions), should_panic)] fn join_panics_when_reward_pool_not_found() { ExtBuilder::default().build_and_execute(|| { - StakingMock::set_bonded_balance(Pools::create_bonded_account(123), 100); + StakingMock::set_bonded_balance(Pools::generate_bonded_account(123), 100); BondedPool:: { id: 123, inner: BondedPoolInner { @@ -1979,7 +1982,7 @@ mod claim_payout { assert_eq!(member_20.last_recorded_reward_counter, 0.into()); // pre-fund the reward account of pool id 3 with some funds. - Currency::set_balance(&Pools::create_reward_account(3), 10); + Currency::set_balance(&Pools::generate_reward_account(3), 10); // create pool 3 Currency::set_balance(&30, 100); @@ -1988,7 +1991,7 @@ mod claim_payout { // reward counter is still the same. let (member_30, _, reward_pool_30) = Pools::get_member_with_pools(&30).unwrap(); assert_eq!( - Currency::free_balance(&Pools::create_reward_account(3)), + Currency::free_balance(&Pools::generate_reward_account(3)), 10 + Currency::minimum_balance() ); @@ -4631,7 +4634,7 @@ mod withdraw_unbonded { // pool is destroyed. assert!(!Metadata::::contains_key(1)); // ensure the pool account is reaped. - assert!(!frame_system::Account::::contains_key(&Pools::create_bonded_account(1))); + assert!(!frame_system::Account::::contains_key(&Pools::generate_bonded_account(1))); }) } @@ -4639,7 +4642,7 @@ mod withdraw_unbonded { fn destroy_works_with_erroneous_extra_consumer() { ExtBuilder::default().ed(1).build_and_execute(|| { // 10 is the depositor for pool 1, with min join bond 10. - let pool_one = Pools::create_bonded_account(1); + let pool_one = Pools::generate_bonded_account(1); // set pool to destroying. unsafe_set_state(1, PoolState::Destroying); @@ -4690,7 +4693,7 @@ mod create { fn create_works() { ExtBuilder::default().build_and_execute(|| { // next pool id is 2. - let next_pool_stash = Pools::create_bonded_account(2); + let next_pool_stash = Pools::generate_bonded_account(2); let ed = Currency::minimum_balance(); assert_eq!(TotalValueLocked::::get(), 10); @@ -5011,6 +5014,13 @@ mod set_state { // surpassed. Making this pool destroyable by anyone. StakingMock::slash_by(1, 10); + // in mock we are using transfer stake which implies slash is greedy. Extrinsic to + // apply pending slash should fail. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(11), 10), + Error::::NotSupported + ); + // When assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); // Then @@ -7473,3 +7483,61 @@ mod chill { }) } } + +// the test mock is using `TransferStake` and so `DelegateStake` is not tested here. Extrinsics +// meant for `DelegateStake` should be gated. +// +// `DelegateStake` tests are in `pallet-nomination-pools-test-delegate-stake`. Since we support both +// strategies currently, we keep these tests as it is but in future we may remove `TransferStake` +// completely. +mod delegate_stake { + use super::*; + #[test] + fn delegation_specific_calls_are_gated() { + ExtBuilder::default().with_check(0).build_and_execute(|| { + // Given + Currency::set_balance(&11, ExistentialDeposit::get() + 2); + assert!(!PoolMembers::::contains_key(11)); + + // When + assert_ok!(Pools::join(RuntimeOrigin::signed(11), 2, 1)); + + // Then + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 11, pool_id: 1, bonded: 2, joined: true }, + ] + ); + + assert_eq!( + PoolMembers::::get(11).unwrap(), + PoolMember:: { pool_id: 1, points: 2, ..Default::default() } + ); + + // ensure pool 1 cannot be migrated. + assert_noop!( + Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), + Error::::NotSupported + ); + + // members cannot be migrated either. + assert_noop!( + Pools::migrate_delegation(RuntimeOrigin::signed(10), 11), + Error::::NotSupported + ); + + // Given + // The bonded balance is slashed in half + StakingMock::slash_by(1, 6); + + // since slash is greedy with `TransferStake`, `apply_slash` should not work either. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 11), + Error::::NotSupported + ); + }); + } +} diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 57ea8dc388f..21711a499b6 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dcu62vjg-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_nomination_pools -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/nomination-pools/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_nomination_pools +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/nomination-pools/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -73,6 +71,10 @@ pub trait WeightInfo { fn set_claim_permission() -> Weight; fn claim_commission() -> Weight; fn adjust_pool_deposit() -> Weight; + fn apply_slash() -> Weight; + fn apply_slash_fail() -> Weight; + fn pool_migrate() -> Weight; + fn migrate_delegation() -> Weight; } /// Weights for `pallet_nomination_pools` using the Substrate node and recommended hardware. @@ -100,6 +102,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -112,11 +116,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3425` + // Measured: `3458` // Estimated: `8877` - // Minimum execution time: 201_783_000 picoseconds. - Weight::from_parts(206_014_000, 8877) - .saturating_add(T::DbWeight::get().reads(20_u64)) + // Minimum execution time: 195_962_000 picoseconds. + Weight::from_parts(201_682_000, 8877) + .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -133,6 +137,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -145,11 +151,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3435` + // Measured: `3468` // Estimated: `8877` - // Minimum execution time: 204_124_000 picoseconds. - Weight::from_parts(207_910_000, 8877) - .saturating_add(T::DbWeight::get().reads(17_u64)) + // Minimum execution time: 197_466_000 picoseconds. + Weight::from_parts(201_356_000, 8877) + .saturating_add(T::DbWeight::get().reads(18_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -168,6 +174,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -180,11 +188,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3500` + // Measured: `3533` // Estimated: `8877` - // Minimum execution time: 240_342_000 picoseconds. - Weight::from_parts(245_735_000, 8877) - .saturating_add(T::DbWeight::get().reads(18_u64)) + // Minimum execution time: 232_623_000 picoseconds. + Weight::from_parts(236_970_000, 8877) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -203,8 +211,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 81_054_000 picoseconds. - Weight::from_parts(83_324_000, 3719) + // Minimum execution time: 77_992_000 picoseconds. + Weight::from_parts(79_927_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -228,6 +236,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -242,11 +252,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3622` + // Measured: `3655` // Estimated: `27847` - // Minimum execution time: 188_835_000 picoseconds. - Weight::from_parts(192_565_000, 27847) - .saturating_add(T::DbWeight::get().reads(20_u64)) + // Minimum execution time: 182_368_000 picoseconds. + Weight::from_parts(185_387_000, 27847) + .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -257,6 +267,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -268,13 +280,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1848` + // Measured: `1881` // Estimated: `4764` - // Minimum execution time: 73_556_000 picoseconds. - Weight::from_parts(76_075_881, 4764) - // Standard Error: 1_419 - .saturating_add(Weight::from_parts(54_476, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 72_179_000 picoseconds. + Weight::from_parts(75_031_092, 4764) + // Standard Error: 1_487 + .saturating_add(Weight::from_parts(56_741, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -289,6 +301,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -306,13 +320,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2238` + // Measured: `2271` // Estimated: `27847` - // Minimum execution time: 144_177_000 picoseconds. - Weight::from_parts(148_686_524, 27847) - // Standard Error: 2_475 - .saturating_add(Weight::from_parts(77_460, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Minimum execution time: 137_277_000 picoseconds. + Weight::from_parts(143_537_793, 27847) + // Standard Error: 3_049 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -329,6 +343,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -364,14 +380,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2525` + // Measured: `2558` // Estimated: `27847` - // Minimum execution time: 255_957_000 picoseconds. - Weight::from_parts(264_206_788, 27847) - // Standard Error: 4_229 - .saturating_add(Weight::from_parts(3_064, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(24_u64)) - .saturating_add(T::DbWeight::get().writes(20_u64)) + // Minimum execution time: 242_522_000 picoseconds. + Weight::from_parts(250_740_608, 27847) + // Standard Error: 4_517 + .saturating_add(Weight::from_parts(13_231, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(25_u64)) + .saturating_add(T::DbWeight::get().writes(21_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -393,12 +409,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -419,11 +437,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1284` + // Measured: `1317` // Estimated: `8538` - // Minimum execution time: 193_527_000 picoseconds. - Weight::from_parts(197_140_000, 8538) - .saturating_add(T::DbWeight::get().reads(24_u64)) + // Minimum execution time: 182_740_000 picoseconds. + Weight::from_parts(188_820_000, 8538) + .saturating_add(T::DbWeight::get().reads(25_u64)) .saturating_add(T::DbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -459,12 +477,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1976` + // Measured: `2009` // Estimated: `4556 + n * (2520 ยฑ0)` - // Minimum execution time: 86_054_000 picoseconds. - Weight::from_parts(88_743_932, 4556) - // Standard Error: 12_699 - .saturating_add(Weight::from_parts(1_829_097, 0).saturating_mul(n.into())) + // Minimum execution time: 83_649_000 picoseconds. + Weight::from_parts(85_754_306, 4556) + // Standard Error: 12_757 + .saturating_add(Weight::from_parts(1_616_356, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -478,10 +496,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1434` + // Measured: `1467` // Estimated: `4556` - // Minimum execution time: 34_544_000 picoseconds. - Weight::from_parts(35_910_000, 4556) + // Minimum execution time: 34_594_000 picoseconds. + Weight::from_parts(36_173_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -496,10 +514,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 14_111_000 picoseconds. - Weight::from_parts(15_204_218, 3735) - // Standard Error: 226 - .saturating_add(Weight::from_parts(1_291, 0).saturating_mul(n.into())) + // Minimum execution time: 13_945_000 picoseconds. + Weight::from_parts(14_764_062, 3735) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_406, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -519,8 +537,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_524_000 picoseconds. - Weight::from_parts(4_882_000, 0) + // Minimum execution time: 4_523_000 picoseconds. + Weight::from_parts(4_727_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -529,8 +547,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_975_000 picoseconds. - Weight::from_parts(18_549_000, 3719) + // Minimum execution time: 17_124_000 picoseconds. + Weight::from_parts(17_718_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -558,10 +576,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2143` + // Measured: `2176` // Estimated: `4556` - // Minimum execution time: 81_574_000 picoseconds. - Weight::from_parts(83_519_000, 4556) + // Minimum execution time: 78_293_000 picoseconds. + Weight::from_parts(81_177_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -577,8 +595,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 35_015_000 picoseconds. - Weight::from_parts(36_159_000, 3719) + // Minimum execution time: 33_105_000 picoseconds. + Weight::from_parts(34_106_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -590,8 +608,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 17_775_000 picoseconds. - Weight::from_parts(18_358_000, 3719) + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_269_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -601,8 +619,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_997_000 picoseconds. - Weight::from_parts(18_041_000, 3719) + // Minimum execution time: 16_557_000 picoseconds. + Weight::from_parts(17_431_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -612,8 +630,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_000_000 picoseconds. - Weight::from_parts(17_807_000, 3719) + // Minimum execution time: 16_723_000 picoseconds. + Weight::from_parts(17_155_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -625,8 +643,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_401_000, 3702) + // Minimum execution time: 14_667_000 picoseconds. + Weight::from_parts(15_242_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -642,8 +660,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 69_759_000 picoseconds. - Weight::from_parts(71_985_000, 3719) + // Minimum execution time: 64_219_000 picoseconds. + Weight::from_parts(66_718_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -659,11 +677,58 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 73_829_000 picoseconds. - Weight::from_parts(75_966_000, 4764) + // Minimum execution time: 70_284_000 picoseconds. + Weight::from_parts(71_375_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `694` + // Estimated: `3702` + // Minimum execution time: 13_403_000 picoseconds. + Weight::from_parts(14_064_000, 3702) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `732` + // Estimated: `3702` + // Minimum execution time: 14_419_000 picoseconds. + Weight::from_parts(15_004_000, 3702) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 759_000 picoseconds. + Weight::from_parts(819_000, 0) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `1648` + // Estimated: `27847` + // Minimum execution time: 36_192_000 picoseconds. + Weight::from_parts(37_038_000, 27847) + .saturating_add(T::DbWeight::get().reads(6_u64)) + } } // For backwards compatibility and tests. @@ -690,6 +755,8 @@ impl WeightInfo for () { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -702,11 +769,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3425` + // Measured: `3458` // Estimated: `8877` - // Minimum execution time: 201_783_000 picoseconds. - Weight::from_parts(206_014_000, 8877) - .saturating_add(RocksDbWeight::get().reads(20_u64)) + // Minimum execution time: 195_962_000 picoseconds. + Weight::from_parts(201_682_000, 8877) + .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -723,6 +790,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -735,11 +804,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3435` + // Measured: `3468` // Estimated: `8877` - // Minimum execution time: 204_124_000 picoseconds. - Weight::from_parts(207_910_000, 8877) - .saturating_add(RocksDbWeight::get().reads(17_u64)) + // Minimum execution time: 197_466_000 picoseconds. + Weight::from_parts(201_356_000, 8877) + .saturating_add(RocksDbWeight::get().reads(18_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -758,6 +827,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -770,11 +841,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3500` + // Measured: `3533` // Estimated: `8877` - // Minimum execution time: 240_342_000 picoseconds. - Weight::from_parts(245_735_000, 8877) - .saturating_add(RocksDbWeight::get().reads(18_u64)) + // Minimum execution time: 232_623_000 picoseconds. + Weight::from_parts(236_970_000, 8877) + .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -793,8 +864,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 81_054_000 picoseconds. - Weight::from_parts(83_324_000, 3719) + // Minimum execution time: 77_992_000 picoseconds. + Weight::from_parts(79_927_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -818,6 +889,8 @@ impl WeightInfo for () { /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -832,11 +905,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3622` + // Measured: `3655` // Estimated: `27847` - // Minimum execution time: 188_835_000 picoseconds. - Weight::from_parts(192_565_000, 27847) - .saturating_add(RocksDbWeight::get().reads(20_u64)) + // Minimum execution time: 182_368_000 picoseconds. + Weight::from_parts(185_387_000, 27847) + .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -847,6 +920,8 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -858,13 +933,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1848` + // Measured: `1881` // Estimated: `4764` - // Minimum execution time: 73_556_000 picoseconds. - Weight::from_parts(76_075_881, 4764) - // Standard Error: 1_419 - .saturating_add(Weight::from_parts(54_476, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 72_179_000 picoseconds. + Weight::from_parts(75_031_092, 4764) + // Standard Error: 1_487 + .saturating_add(Weight::from_parts(56_741, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -879,6 +954,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -896,13 +973,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2238` + // Measured: `2271` // Estimated: `27847` - // Minimum execution time: 144_177_000 picoseconds. - Weight::from_parts(148_686_524, 27847) - // Standard Error: 2_475 - .saturating_add(Weight::from_parts(77_460, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Minimum execution time: 137_277_000 picoseconds. + Weight::from_parts(143_537_793, 27847) + // Standard Error: 3_049 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -919,6 +996,8 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -954,14 +1033,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2525` + // Measured: `2558` // Estimated: `27847` - // Minimum execution time: 255_957_000 picoseconds. - Weight::from_parts(264_206_788, 27847) - // Standard Error: 4_229 - .saturating_add(Weight::from_parts(3_064, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(24_u64)) - .saturating_add(RocksDbWeight::get().writes(20_u64)) + // Minimum execution time: 242_522_000 picoseconds. + Weight::from_parts(250_740_608, 27847) + // Standard Error: 4_517 + .saturating_add(Weight::from_parts(13_231, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(25_u64)) + .saturating_add(RocksDbWeight::get().writes(21_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -983,12 +1062,14 @@ impl WeightInfo for () { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -1009,11 +1090,11 @@ impl WeightInfo for () { /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1284` + // Measured: `1317` // Estimated: `8538` - // Minimum execution time: 193_527_000 picoseconds. - Weight::from_parts(197_140_000, 8538) - .saturating_add(RocksDbWeight::get().reads(24_u64)) + // Minimum execution time: 182_740_000 picoseconds. + Weight::from_parts(188_820_000, 8538) + .saturating_add(RocksDbWeight::get().reads(25_u64)) .saturating_add(RocksDbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -1049,12 +1130,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1976` + // Measured: `2009` // Estimated: `4556 + n * (2520 ยฑ0)` - // Minimum execution time: 86_054_000 picoseconds. - Weight::from_parts(88_743_932, 4556) - // Standard Error: 12_699 - .saturating_add(Weight::from_parts(1_829_097, 0).saturating_mul(n.into())) + // Minimum execution time: 83_649_000 picoseconds. + Weight::from_parts(85_754_306, 4556) + // Standard Error: 12_757 + .saturating_add(Weight::from_parts(1_616_356, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -1068,10 +1149,10 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1434` + // Measured: `1467` // Estimated: `4556` - // Minimum execution time: 34_544_000 picoseconds. - Weight::from_parts(35_910_000, 4556) + // Minimum execution time: 34_594_000 picoseconds. + Weight::from_parts(36_173_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1086,10 +1167,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 14_111_000 picoseconds. - Weight::from_parts(15_204_218, 3735) - // Standard Error: 226 - .saturating_add(Weight::from_parts(1_291, 0).saturating_mul(n.into())) + // Minimum execution time: 13_945_000 picoseconds. + Weight::from_parts(14_764_062, 3735) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_406, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1109,8 +1190,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_524_000 picoseconds. - Weight::from_parts(4_882_000, 0) + // Minimum execution time: 4_523_000 picoseconds. + Weight::from_parts(4_727_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1119,8 +1200,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_975_000 picoseconds. - Weight::from_parts(18_549_000, 3719) + // Minimum execution time: 17_124_000 picoseconds. + Weight::from_parts(17_718_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1148,10 +1229,10 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2143` + // Measured: `2176` // Estimated: `4556` - // Minimum execution time: 81_574_000 picoseconds. - Weight::from_parts(83_519_000, 4556) + // Minimum execution time: 78_293_000 picoseconds. + Weight::from_parts(81_177_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1167,8 +1248,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 35_015_000 picoseconds. - Weight::from_parts(36_159_000, 3719) + // Minimum execution time: 33_105_000 picoseconds. + Weight::from_parts(34_106_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1180,8 +1261,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 17_775_000 picoseconds. - Weight::from_parts(18_358_000, 3719) + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_269_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1191,8 +1272,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_997_000 picoseconds. - Weight::from_parts(18_041_000, 3719) + // Minimum execution time: 16_557_000 picoseconds. + Weight::from_parts(17_431_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1202,8 +1283,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_000_000 picoseconds. - Weight::from_parts(17_807_000, 3719) + // Minimum execution time: 16_723_000 picoseconds. + Weight::from_parts(17_155_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1215,8 +1296,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_401_000, 3702) + // Minimum execution time: 14_667_000 picoseconds. + Weight::from_parts(15_242_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1232,8 +1313,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 69_759_000 picoseconds. - Weight::from_parts(71_985_000, 3719) + // Minimum execution time: 64_219_000 picoseconds. + Weight::from_parts(66_718_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1249,9 +1330,56 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 73_829_000 picoseconds. - Weight::from_parts(75_966_000, 4764) + // Minimum execution time: 70_284_000 picoseconds. + Weight::from_parts(71_375_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `694` + // Estimated: `3702` + // Minimum execution time: 13_403_000 picoseconds. + Weight::from_parts(14_064_000, 3702) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `732` + // Estimated: `3702` + // Minimum execution time: 14_419_000 picoseconds. + Weight::from_parts(15_004_000, 3702) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 759_000 picoseconds. + Weight::from_parts(819_000, 0) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `1648` + // Estimated: `27847` + // Minimum execution time: 36_192_000 picoseconds. + Weight::from_parts(37_038_000, 27847) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + } } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml new file mode 100644 index 00000000000..ea8eb206969 --- /dev/null +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "pallet-nomination-pools-test-delegate-stake" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME nomination pools pallet tests with the staking pallet" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } + +sp-runtime = { path = "../../../primitives/runtime" } +sp-io = { path = "../../../primitives/io" } +sp-std = { path = "../../../primitives/std" } +sp-staking = { path = "../../../primitives/staking" } +sp-core = { path = "../../../primitives/core" } + +frame-system = { path = "../../system" } +frame-support = { path = "../../support" } +frame-election-provider-support = { path = "../../election-provider-support" } + +pallet-timestamp = { path = "../../timestamp" } +pallet-balances = { path = "../../balances" } +pallet-staking = { path = "../../staking" } +pallet-delegated-staking = { path = "../../delegated-staking" } +pallet-bags-list = { path = "../../bags-list" } +pallet-staking-reward-curve = { path = "../../staking/reward-curve" } +pallet-nomination-pools = { path = ".." } + +sp-tracing = { path = "../../../primitives/tracing" } +log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs new file mode 100644 index 00000000000..d3235760ed2 --- /dev/null +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -0,0 +1,1158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +mod mock; + +use frame_support::{ + assert_noop, assert_ok, + traits::{fungible::InspectHold, Currency}, +}; +use mock::*; +use pallet_nomination_pools::{ + BondExtra, BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, + PoolMembers, PoolState, +}; +use pallet_staking::{ + CurrentEra, Error as StakingError, Event as StakingEvent, Payee, RewardDestination, +}; + +use pallet_delegated_staking::{Error as DelegatedStakingError, Event as DelegatedStakingEvent}; + +use sp_runtime::{bounded_btree_map, traits::Zero}; + +#[test] +fn pool_lifecycle_e2e() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + // pool goes into destroying + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + + // depositor cannot unbond yet. + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + PoolsError::::MinimumBondNotMet, + ); + + // now the members want to unbond. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(20).unwrap().points, 0); + assert_eq!(PoolMembers::::get(21).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(21).unwrap().points, 0); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10, era: 3 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10, era: 3 }, + ] + ); + + // depositor cannot still unbond + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + PoolsError::::MinimumBondNotMet, + ); + + for e in 1..BondingDuration::get() { + CurrentEra::::set(Some(e)); + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0), + PoolsError::::CannotWithdrawAny + ); + } + + // members are now unlocked. + CurrentEra::::set(Some(BondingDuration::get())); + + // depositor cannot still unbond + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + PoolsError::::MinimumBondNotMet, + ); + + // but members can now withdraw. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + assert!(PoolMembers::::get(20).is_none()); + assert!(PoolMembers::::get(21).is_none()); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 20, pool_id: 1, points: 10, balance: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + PoolsEvent::Withdrawn { member: 21, pool_id: 1, points: 10, balance: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 21 }, + ] + ); + + // as soon as all members have left, the depositor can try to unbond, but since the + // min-nominator intention is set, they must chill first. + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + pallet_staking::Error::::InsufficientBond + ); + + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50, era: 6 }] + ); + + // waiting another bonding duration: + CurrentEra::::set(Some(BondingDuration::get() * 2)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1)); + + // pools is fully destroyed now. + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + }) +} + +#[test] +fn pool_chill_e2e() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + // in case depositor does not have more than `MinNominatorBond` staked, we can end up in + // situation where a member unbonding would cause pool balance to drop below + // `MinNominatorBond` and hence not allowed. This can happen if the `MinNominatorBond` is + // increased after the pool is created. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + pallet_staking::ConfigOp::Set(55), // minimum nominator bond + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + )); + + // members can unbond as long as total stake of the pool is above min nominator bond + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10),); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(20).unwrap().points, 0); + + // this member cannot unbond since it will cause `pool stake < MinNominatorBond` + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(21), 21, 10), + StakingError::::InsufficientBond, + ); + + // members can call `chill` permissionlessly now + assert_ok!(Pools::chill(RuntimeOrigin::signed(20), 1)); + + // now another member can unbond. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_eq!(PoolMembers::::get(21).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(21).unwrap().points, 0); + + // nominator can not resume nomination until depositor have enough stake + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // other members joining pool does not affect the depositor's ability to resume nomination + assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1)); + + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // depositor can bond extra stake + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + + // `chill` can not be called permissionlessly anymore + assert_noop!( + Pools::chill(RuntimeOrigin::signed(20), 1), + PoolsError::::NotNominator, + ); + + // now nominator can resume nomination + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + // skip to make the unbonding period end. + CurrentEra::::set(Some(BondingDuration::get())); + + // members can now withdraw. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // other member bonding + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // depositor bond extra + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 }, + ] + ); + }) +} + +#[test] +fn pool_slash_e2e() { + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + assert_eq!( + Payee::::get(POOL1_BONDED), + Some(RewardDestination::Account(POOL1_REWARD)) + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 20, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 20, joined: true }, + ] + ); + + // now let's progress a bit. + CurrentEra::::set(Some(1)); + + // 20 / 80 of the total funds are unlocked, and safe from any further slash. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 4 } + ] + ); + + CurrentEra::::set(Some(2)); + + // note: depositor cannot fully unbond at this point. + // these funds will still get slashed. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 5 }, + ] + ); + + // At this point, 20 are safe from slash, 30 are unlocking but vulnerable to slash, and and + // another 30 are active and vulnerable to slash. Let's slash half of them. + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 2, // slash era 2, affects chunks at era 5 onwards. + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + // 30 has been slashed to 15 (15 slash) + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 5, balance: 15 }, + // 30 has been slashed to 15 (15 slash) + PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } + ] + ); + + CurrentEra::::set(Some(3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!( + PoolMembers::::get(21).unwrap(), + PoolMember { + pool_id: 1, + points: 0, + last_recorded_reward_counter: Zero::zero(), + // the 10 points unlocked just now correspond to 5 points in the unbond pool. + unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) + } + ); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] + ); + + // now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free. + CurrentEra::::set(Some(6)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // 20 had unbonded 10 safely, and 10 got slashed by half. + PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 10 + 5, points: 20 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + // 21 unbonded all of it after the slash + PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 5 + 5, points: 15 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 21 } + ] + ); + assert_eq!( + staking_events_since_last_call(), + // a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }] + ); + + // now, finally, we can unbond the depositor further than their current limit. + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 } + ] + ); + + CurrentEra::::set(Some(9)); + assert_eq!( + PoolMembers::::get(10).unwrap(), + PoolMember { + pool_id: 1, + points: 0, + last_recorded_reward_counter: Zero::zero(), + unbonding_eras: bounded_btree_map!(4 => 10, 5 => 10, 9 => 10) + } + ); + // withdraw the depositor, they should lose 12 balance in total due to slash. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 10, pool_id: 1, balance: 10 + 15, points: 30 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + }); +} + +#[test] +fn pool_slash_proportional() { + // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that + // happened in era 100 should only affect the latter two. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 10, + amount: 40 + }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(22), bond, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 20, + amount: bond + }, + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 21, + amount: bond + }, + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 22, + amount: bond + } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: bond, joined: true }, + ] + ); + + // now let's progress a lot. + CurrentEra::::set(Some(99)); + + // and unbond + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + CurrentEra::::set(Some(100)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 21, + pool_id: 1, + balance: bond, + points: bond, + era: 128 + }] + ); + + CurrentEra::::set(Some(101)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 22, + pool_id: 1, + balance: bond, + points: bond, + era: 129 + }] + ); + + // Apply a slash that happened in era 100. This is typically applied with a delay. + // Of the total 100, 50 is slashed. + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + // This era got slashed 12.5, which rounded up to 13. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 7 }, + // This era got slashed 12 instead of 12.5 because an earlier chunk got 0.5 more + // slashed, and 12 is all the remaining slash + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 }, + // Bonded pool got slashed for 25, remaining 15 in it. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } + ] + ); + + // 21's balance in the pool is slashed. + assert_eq!(PoolMembers::::get(21).unwrap().total_balance(), 7); + // But their actual balance is still unslashed. + assert_eq!(Balances::total_balance_on_hold(&21), bond); + // apply slash permissionlessly. + assert_ok!(Pools::apply_slash(RuntimeOrigin::signed(10), 21)); + // member balance is slashed. + assert_eq!(Balances::total_balance_on_hold(&21), 7); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Slashed { + agent: POOL1_BONDED, + delegator: 21, + amount: bond - 7 + }] + ); + + // 22 balance isn't slashed yet as well. + assert_eq!(PoolMembers::::get(22).unwrap().total_balance(), 8); + assert_eq!(Balances::total_balance_on_hold(&22), bond); + + // they try to withdraw. This should slash them. + CurrentEra::::set(Some(129)); + let pre_balance = Balances::free_balance(&22); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 0)); + // all balance should be released. + assert_eq!(Balances::total_balance_on_hold(&22), 0); + assert_eq!(Balances::free_balance(&22), pre_balance + 8); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Slashed { + agent: POOL1_BONDED, + delegator: 22, + amount: bond - 8 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 22, amount: 8 }, + ] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_only_bonded_pool() { + // A typical example where a pool member unbonds in era 99, and they can get away with a slash + // that happened in era 100, as long as the pool has enough active bond to cover the slash. If + // everything else in the slashing/staking system works, this should always be the case. + // Nonetheless, `ledger.slash` has been written such that it will slash greedily from any chunk + // if it runs out of chunks that it thinks should be affected by the slash. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + // slash for 30. This will be deducted only from the bonded pool. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_bonded_pool_and_chunks() { + // An uncommon example where even though some funds are unlocked such that they should not be + // affected by a slash, we still slash out of them. This should not happen at all. If a + // nomination has unbonded, from the next era onwards, their exposure will drop, so if an era + // happens in that era, then their share of that slash should naturally be less, such that only + // their active ledger stake is enough to compensate it. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + // slash 50. This will be deducted only from the bonded pool and one of the unbonding pools. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + // out of 20, 10 was taken. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 10 }, + // out of 40, all was taken. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 0 } + ] + ); + }); +} +#[test] +fn pool_migration_e2e() { + new_test_ext().execute_with(|| { + LegacyAdapter::set(true); + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool with TransferStake strategy. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have three members join + let pre_20 = Balances::free_balance(20); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + let pre_21 = Balances::free_balance(21); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + let pre_22 = Balances::free_balance(22); + assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1)); + + // verify members balance is moved to pool. + assert_eq!(Balances::free_balance(20), pre_20 - 10); + assert_eq!(Balances::free_balance(21), pre_21 - 10); + assert_eq!(Balances::free_balance(22), pre_22 - 10); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + CurrentEra::::set(Some(2)); + // 20 is partially unbonding + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); + + CurrentEra::::set(Some(3)); + // 21 is fully unbonding + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 5, points: 5, era: 5 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 6 }, + ] + ); + + // with `TransferStake`, we can't migrate. + assert_noop!( + Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), + PoolsError::::NotSupported + ); + + // we reset the adapter to `DelegateStake`. + LegacyAdapter::set(false); + + // cannot migrate the member delegation unless pool is migrated first. + assert_noop!( + Pools::migrate_delegation(RuntimeOrigin::signed(10), 20), + PoolsError::::PoolNotMigrated + ); + + // migrate the pool. + assert_ok!(Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1)); + + // migrate again does not work. + assert_noop!( + Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), + PoolsError::::PoolAlreadyMigrated + ); + + // unclaimed delegations to the pool are stored in this account. + let proxy_delegator_1 = DelegatedStaking::generate_proxy_delegator(POOL1_BONDED); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: proxy_delegator_1, + amount: 50 + 10 * 3 + }] + ); + + // move to era 5 when 20 can withdraw unbonded funds. + CurrentEra::::set(Some(5)); + // Unbond works even without claiming delegation. Lets unbond 22. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, 5)); + + // withdraw fails for 20 before claiming delegation + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 10), + DelegatedStakingError::::NotDelegator + ); + + let pre_claim_balance_20 = Balances::total_balance(&20); + assert_eq!(Balances::total_balance_on_hold(&20), 0); + + // migrate delegation for 20. This is permissionless and can be called by anyone. + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 20)); + + // tokens moved to 20's account and held there. + assert_eq!(Balances::total_balance(&20), pre_claim_balance_20 + 10); + assert_eq!(Balances::total_balance_on_hold(&20), 10); + + // withdraw works now + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 5)); + + // balance unlocked in 20's account + assert_eq!(Balances::total_balance_on_hold(&20), 5); + assert_eq!(Balances::total_balance(&20), pre_claim_balance_20 + 10); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }, + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 22, pool_id: 1, balance: 5, points: 5, era: 8 }, + PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 5, points: 5 }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 20, + amount: 10 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 20, amount: 5 } + ] + ); + + // MIGRATE 21 + let pre_migrate_balance_21 = Balances::total_balance(&21); + assert_eq!(Balances::total_balance_on_hold(&21), 0); + + // migrate delegation for 21. + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 21)); + + // tokens moved to 21's account and held there. + assert_eq!(Balances::total_balance(&21), pre_migrate_balance_21 + 10); + assert_eq!(Balances::total_balance_on_hold(&21), 10); + + // withdraw fails since 21 only unbonds at era 6. + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 10), + PoolsError::::CannotWithdrawAny + ); + + // go to era when 21 can unbond + CurrentEra::::set(Some(6)); + + // withdraw works now + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 10)); + + // all balance unlocked in 21's account + assert_eq!(Balances::total_balance_on_hold(&21), 0); + assert_eq!(Balances::total_balance(&21), pre_migrate_balance_21 + 10); + + // MIGRATE 22 + let pre_migrate_balance_22 = Balances::total_balance(&22); + assert_eq!(Balances::total_balance_on_hold(&22), 0); + + // migrate delegation for 22. + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 22)); + + // tokens moved to 22's account and held there. + assert_eq!(Balances::total_balance(&22), pre_migrate_balance_22 + 10); + assert_eq!(Balances::total_balance_on_hold(&22), 10); + + // withdraw fails since 22 only unbonds at era 8. + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 5), + PoolsError::::CannotWithdrawAny + ); + + // go to era when 22 can unbond + CurrentEra::::set(Some(10)); + + // withdraw works now + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 10)); + + // balance of 5 unlocked in 22's account + assert_eq!(Balances::total_balance_on_hold(&22), 10 - 5); + + // assert events for 21 and 22. + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 } + ] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 10, points: 10 }, + // 21 was fully unbonding and removed from pool. + PoolsEvent::MemberRemoved { member: 21, pool_id: 1 }, + PoolsEvent::Withdrawn { member: 22, pool_id: 1, balance: 5, points: 5 }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 21, + amount: 10 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 21, amount: 10 }, + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 22, + amount: 10 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 22, amount: 5 } + ] + ); + }) +} diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs new file mode 100644 index 00000000000..1c0a0166fd9 --- /dev/null +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -0,0 +1,406 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_election_provider_support::VoteWeight; +use frame_support::{ + assert_ok, derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, ConstU8}, + PalletId, +}; +use frame_system::EnsureRoot; +use pallet_nomination_pools::{adapter::StakeStrategyType, BondType}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + BuildStorage, FixedU128, Perbill, +}; + +type AccountId = u128; +type Nonce = u32; +type BlockNumber = u64; +type Balance = u128; + +pub(crate) type T = Runtime; + +pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128; +pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Nonce = Nonce; + type RuntimeCall = RuntimeCall; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +parameter_types! { + pub static ExistentialDeposit: Balance = 5; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = ConstU32<1>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = (); +} + +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static BondingDuration: u32 = 3; +} + +impl pallet_staking::Config for Runtime { + type Currency = Balances; + type CurrencyBalance = Balance; + type UnixTime = pallet_timestamp::Pallet; + type CurrencyToVote = (); + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = (); + type SlashDeferDuration = (); + type AdminOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = (); + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = (); + type MaxExposurePageSize = ConstU32<64>; + type ElectionProvider = + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; + type GenesisElectionProvider = Self::ElectionProvider; + type VoterList = VoterList; + type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type HistoryDepth = ConstU32<84>; + type EventListeners = (Pools, DelegatedStaking); + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; +} + +parameter_types! { + pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; +} + +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type BagThresholds = BagThresholds; + type ScoreProvider = Staking; + type Score = VoteWeight; +} + +pub struct BalanceToU256; +impl Convert for BalanceToU256 { + fn convert(n: Balance) -> sp_core::U256 { + n.into() + } +} + +pub struct U256ToBalance; +impl Convert for U256ToBalance { + fn convert(n: sp_core::U256) -> Balance { + n.try_into().unwrap() + } +} + +parameter_types! { + pub const PostUnbondingPoolsWindow: u32 = 10; + pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); + pub static LegacyAdapter: bool = false; +} + +pub struct MockAdapter; +type DelegateStake = + pallet_nomination_pools::adapter::DelegateStake; +type TransferStake = pallet_nomination_pools::adapter::TransferStake; +impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { + type Balance = Balance; + type AccountId = AccountId; + type CoreStaking = Staking; + + fn strategy_type() -> StakeStrategyType { + if LegacyAdapter::get() { + return TransferStake::strategy_type() + } + DelegateStake::strategy_type() + } + fn transferable_balance(pool_account: &Self::AccountId) -> Self::Balance { + if LegacyAdapter::get() { + return TransferStake::transferable_balance(pool_account) + } + DelegateStake::transferable_balance(pool_account) + } + + fn total_balance(pool_account: &Self::AccountId) -> Self::Balance { + if LegacyAdapter::get() { + return TransferStake::total_balance(pool_account) + } + DelegateStake::total_balance(pool_account) + } + + fn member_delegation_balance(member_account: &Self::AccountId) -> Self::Balance { + if LegacyAdapter::get() { + return TransferStake::member_delegation_balance(member_account) + } + DelegateStake::member_delegation_balance(member_account) + } + + fn pledge_bond( + who: &Self::AccountId, + pool_account: &Self::AccountId, + reward_account: &Self::AccountId, + amount: Self::Balance, + bond_type: BondType, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::pledge_bond(who, pool_account, reward_account, amount, bond_type) + } + DelegateStake::pledge_bond(who, pool_account, reward_account, amount, bond_type) + } + + fn member_withdraw( + who: &Self::AccountId, + pool_account: &Self::AccountId, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::member_withdraw(who, pool_account, amount, num_slashing_spans) + } + DelegateStake::member_withdraw(who, pool_account, amount, num_slashing_spans) + } + + fn has_pending_slash(pool_account: &Self::AccountId) -> bool { + if LegacyAdapter::get() { + return TransferStake::has_pending_slash(pool_account) + } + DelegateStake::has_pending_slash(pool_account) + } + + fn member_slash( + who: &Self::AccountId, + pool_account: &Self::AccountId, + amount: Self::Balance, + maybe_reporter: Option, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::member_slash(who, pool_account, amount, maybe_reporter) + } + DelegateStake::member_slash(who, pool_account, amount, maybe_reporter) + } + + fn migrate_nominator_to_agent( + agent: &Self::AccountId, + reward_account: &Self::AccountId, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::migrate_nominator_to_agent(agent, reward_account) + } + DelegateStake::migrate_nominator_to_agent(agent, reward_account) + } + + fn migrate_delegation( + agent: &Self::AccountId, + delegator: &Self::AccountId, + value: Self::Balance, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::migrate_delegation(agent, delegator, value) + } + DelegateStake::migrate_delegation(agent, delegator, value) + } +} +impl pallet_nomination_pools::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type RuntimeFreezeReason = RuntimeFreezeReason; + type RewardCounter = FixedU128; + type BalanceToU256 = BalanceToU256; + type U256ToBalance = U256ToBalance; + type StakeAdapter = MockAdapter; + type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; + type MaxMetadataLen = ConstU32<256>; + type MaxUnbonding = ConstU32<8>; + type MaxPointsToBalance = ConstU8<10>; + type PalletId = PoolsPalletId; + type AdminOrigin = EnsureRoot; +} + +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Runtime { + System: frame_system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Staking: pallet_staking, + VoterList: pallet_bags_list::, + Pools: pallet_nomination_pools, + DelegatedStaking: pallet_delegated_staking, + } +); + +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let _ = pallet_nomination_pools::GenesisConfig:: { + min_join_bond: 2, + min_create_bond: 2, + max_pools: Some(3), + max_members_per_pool: Some(5), + max_members: Some(3 * 5), + global_max_commission: Some(Perbill::from_percent(90)), + } + .assimilate_storage(&mut storage) + .unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![(10, 100), (20, 100), (21, 100), (22, 100)], + } + .assimilate_storage(&mut storage) + .unwrap(); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // for events to be deposited. + frame_system::Pallet::::set_block_number(1); + + // set some limit for nominations. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + pallet_staking::ConfigOp::Set(10), // minimum nominator bond + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + )); + }); + + ext +} + +parameter_types! { + static ObservedEventsPools: usize = 0; + static ObservedEventsStaking: usize = 0; + static ObservedEventsBalances: usize = 0; + static ObservedEventsDelegatedStaking: usize = 0; +} + +pub(crate) fn pool_events_since_last_call() -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::Pools(inner) = e { Some(inner) } else { None }) + .collect::>(); + let already_seen = ObservedEventsPools::get(); + ObservedEventsPools::set(events.len()); + events.into_iter().skip(already_seen).collect() +} + +pub(crate) fn staking_events_since_last_call() -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None }) + .collect::>(); + let already_seen = ObservedEventsStaking::get(); + ObservedEventsStaking::set(events.len()); + events.into_iter().skip(already_seen).collect() +} + +pub(crate) fn delegated_staking_events_since_last_call( +) -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map( + |e| if let RuntimeEvent::DelegatedStaking(inner) = e { Some(inner) } else { None }, + ) + .collect::>(); + let already_seen = ObservedEventsDelegatedStaking::get(); + ObservedEventsDelegatedStaking::set(events.len()); + events.into_iter().skip(already_seen).collect() +} diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml similarity index 96% rename from substrate/frame/nomination-pools/test-staking/Cargo.toml rename to substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml index ada52db6de5..5f9bc9af3a2 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-nomination-pools-test-staking" +name = "pallet-nomination-pools-test-transfer-stake" version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/substrate/frame/nomination-pools/test-staking/src/lib.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs similarity index 100% rename from substrate/frame/nomination-pools/test-staking/src/lib.rs rename to substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs similarity index 98% rename from substrate/frame/nomination-pools/test-staking/src/mock.rs rename to substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index 93a05ddfae9..0970570453b 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -180,7 +180,7 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 67a86b86226..294918376d8 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -35,7 +35,7 @@ use frame_support::{ defensive, ensure, traits::{Defensive, LockableCurrency}, }; -use sp_staking::StakingAccount; +use sp_staking::{StakingAccount, StakingInterface}; use sp_std::prelude::*; use crate::{ diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 4f91fd6dff2..053ecdef2b0 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -361,7 +361,7 @@ pub type BalanceOf = ::CurrencyBalance; type PositiveImbalanceOf = <::Currency as Currency< ::AccountId, >>::PositiveImbalance; -type NegativeImbalanceOf = <::Currency as Currency< +pub type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 52361c6ccdc..90374451a3a 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1161,11 +1161,6 @@ impl Pallet { ) -> Exposure> { EraInfo::::get_full_exposure(era, account) } - - /// Whether `who` is a virtual staker whose funds are managed by another pallet. - pub(crate) fn is_virtual_staker(who: &T::AccountId) -> bool { - VirtualStakers::::contains_key(who) - } } impl Pallet { @@ -1885,6 +1880,11 @@ impl StakingInterface for Pallet { } } + /// Whether `who` is a virtual staker whose funds are managed by another pallet. + fn is_virtual_staker(who: &T::AccountId) -> bool { + VirtualStakers::::contains_key(who) + } + fn slash_reward_fraction() -> Perbill { SlashRewardFraction::::get() } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index f82266c0390..284a801a0f0 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -39,6 +39,7 @@ use sp_runtime::{ use sp_staking::{ EraIndex, Page, SessionIndex, StakingAccount::{self, Controller, Stash}, + StakingInterface, }; use sp_std::prelude::*; diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index f831f625957..1fe608cd335 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -64,7 +64,7 @@ use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::EraIndex; +use sp_staking::{EraIndex, StakingInterface}; use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index c7045508cea..28a61cd4331 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -285,6 +285,13 @@ pub trait StakingInterface { Self::status(who).map(|s| matches!(s, StakerStatus::Validator)).unwrap_or(false) } + /// Checks whether the staker is a virtual account. + /// + /// A virtual staker is an account whose locks are not managed by the [`StakingInterface`] + /// implementation but by an external pallet. See [`StakingUnchecked::virtual_bond`] for more + /// details. + fn is_virtual_staker(who: &Self::AccountId) -> bool; + /// Get the nominations of a stash, if they are a nominator, `None` otherwise. fn nominations(who: &Self::AccountId) -> Option> { match Self::status(who) { @@ -573,6 +580,12 @@ pub trait DelegationMigrator { delegator: &Self::AccountId, value: Self::Balance, ) -> DispatchResult; + + /// Drop the `Agent` account and its associated delegators. + /// + /// Also removed from [`StakingUnchecked`] as a Virtual Staker. Useful for testing. + #[cfg(feature = "runtime-benchmarks")] + fn drop_agent(agent: &Self::AccountId); } sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); -- GitLab From fd161917108a14e791c1444ea1c767e9f6134bdf Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 23 May 2024 09:03:14 +0100 Subject: [PATCH 052/106] Fix README.md Logo URL (#4546) This one also works and it is easier. --- README.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0b1d3b6084a..e139dc0ee07 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,10 @@

-# Polkadot SDK - - - - - +![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_White.png#gh-dark-mode-only) +![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_Black.png#gh-light-mode-only) +# Polkadot SDK ![GitHub stars](https://img.shields.io/github/stars/paritytech/polkadot-sdk)  ![GitHub forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) -- GitLab From a823d18f057cdf1996d73d0c964122967df4a2e7 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 23 May 2024 12:11:20 +0200 Subject: [PATCH 053/106] Fix bridges grandpa benchmarks (#2577) (#4548) Cherry-picked fix from upcoming https://github.com/paritytech/polkadot-sdk/pull/4494 --------- Co-authored-by: Svyatoslav Nikolsky Co-authored-by: command-bot <> --- bridges/modules/grandpa/src/benchmarking.rs | 3 ++- .../src/weights/pallet_bridge_grandpa.rs | 20 ++++++++-------- .../src/weights/pallet_bridge_grandpa.rs | 24 +++++++++---------- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/bridges/modules/grandpa/src/benchmarking.rs b/bridges/modules/grandpa/src/benchmarking.rs index a458abf524d..fb7354e05c0 100644 --- a/bridges/modules/grandpa/src/benchmarking.rs +++ b/bridges/modules/grandpa/src/benchmarking.rs @@ -70,11 +70,12 @@ const MAX_VOTE_ANCESTRIES_RANGE_END: u32 = // the same with validators - if there are too much validators, let's run benchmarks on subrange fn precommits_range_end, I: 'static>() -> u32 { let max_bridged_authorities = T::BridgedChain::MAX_AUTHORITIES_COUNT; - if max_bridged_authorities > 128 { + let max_bridged_authorities = if max_bridged_authorities > 128 { sp_std::cmp::max(128, max_bridged_authorities / 5) } else { max_bridged_authorities }; + required_justification_precommits(max_bridged_authorities) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 257e2dcac2f..11e1439a1f6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,17 +62,17 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { // Proof Size summary in bytes: - // Measured: `440 + p * (60 ยฑ0)` + // Measured: `438 + p * (60 ยฑ0)` // Estimated: `51735` - // Minimum execution time: 306_046_000 picoseconds. - Weight::from_parts(384_361_000, 0) + // Minimum execution time: 300_829_000 picoseconds. + Weight::from_parts(321_573_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 14_298 - .saturating_add(Weight::from_parts(49_045_748, 0).saturating_mul(p.into())) + // Standard Error: 25_917 + .saturating_add(Weight::from_parts(48_613_160, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -90,8 +90,8 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo pallet_bridge_grandpa::WeightInfo for WeightInfo Weight { // Proof Size summary in bytes: - // Measured: `270 + p * (60 ยฑ0)` + // Measured: `268 + p * (60 ยฑ0)` // Estimated: `51735` - // Minimum execution time: 294_098_000 picoseconds. - Weight::from_parts(31_208_540, 0) + // Minimum execution time: 291_721_000 picoseconds. + Weight::from_parts(37_495_589, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 8_832 - .saturating_add(Weight::from_parts(40_930_987, 0).saturating_mul(p.into())) - // Standard Error: 147_319 - .saturating_add(Weight::from_parts(2_663_839, 0).saturating_mul(v.into())) + // Standard Error: 22_170 + .saturating_add(Weight::from_parts(45_403_072, 0).saturating_mul(p.into())) + // Standard Error: 73_977 + .saturating_add(Weight::from_parts(2_130_216, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -92,8 +92,8 @@ impl pallet_bridge_grandpa::WeightInfo for WeightInfo Date: Thu, 23 May 2024 13:17:09 +0200 Subject: [PATCH 054/106] Contracts: Rework host fn benchmarks (#4233) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix https://github.com/paritytech/polkadot-sdk/issues/4163 This PR does the following: Update to pallet-contracts-proc-macro: - Parse #[cfg] so we can add a dummy noop host function for benchmark. - Generate BenchEnv:: so we can call host functions directly in the benchmark. - Add the weight of the noop host function before calling the host function itself Update benchmarks: - Update all host function benchmark, a host function benchmark now simply call the host function, instead of invoking the function n times from within a contract. - Refactor RuntimeCosts & Schedule, for most host functions, we can now use the generated weight function directly instead of computing the diff with the cost! macro ```rust // Before #[benchmark(pov_mode = Measured)] fn seal_input(r: Linear<0, API_BENCHMARK_RUNS>) { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "seal0", name: "seal_input", params: vec![ValueType::I32, ValueType::I32], return_type: None, }], data_segments: vec![DataSegment { offset: 0, value: 0u32.to_le_bytes().to_vec() }], call_body: Some(body::repeated( r, &[ Instruction::I32Const(4), // ptr where to store output Instruction::I32Const(0), // ptr to length Instruction::Call(0), ], )), ..Default::default() }); call_builder!(func, code); let res; #[block] { res = func.call(); } assert_eq!(res.did_revert(), false); } ``` ```rust // After fn seal_input(n: Linear<0, { code::max_pages::() * 64 * 1024 - 4 }>) { let mut setup = CallSetup::::default(); let (mut ext, _) = setup.ext(); let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; n as usize]); let mut memory = memory!(n.to_le_bytes(), vec![0u8; n as usize],); let result; #[block] { result = BenchEnv::seal0_input(&mut runtime, &mut memory, 4, 0) } assert_ok!(result); assert_eq!(&memory[4..], &vec![42u8; n as usize]); } ``` [Weights compare](https://weights.tasty.limo/compare?unit=weight&ignore_errors=true&threshold=10&method=asymptotic&repo=polkadot-sdk&old=master&new=pg%2Frework-host-benchs&path_pattern=substrate%2Fframe%2Fcontracts%2Fsrc%2Fweights.rs%2Cpolkadot%2Fruntime%2F*%2Fsrc%2Fweights%2F**%2F*.rs%2Cpolkadot%2Fbridges%2Fmodules%2F*%2Fsrc%2Fweights.rs%2Ccumulus%2F**%2Fweights%2F*.rs%2Ccumulus%2F**%2Fweights%2Fxcm%2F*.rs%2Ccumulus%2F**%2Fsrc%2Fweights.rs) --------- Co-authored-by: command-bot <> Co-authored-by: Alexander TheiรŸen --- prdoc/pr_4233.prdoc | 14 + substrate/frame/contracts/README.md | 13 - .../frame/contracts/proc-macro/src/lib.rs | 178 +- .../src/benchmarking/call_builder.rs | 56 +- .../frame/contracts/src/benchmarking/code.rs | 75 +- .../frame/contracts/src/benchmarking/mod.rs | 2488 ++++------------- substrate/frame/contracts/src/exec.rs | 13 +- substrate/frame/contracts/src/lib.rs | 2 +- substrate/frame/contracts/src/schedule.rs | 319 +-- substrate/frame/contracts/src/tests.rs | 3 +- substrate/frame/contracts/src/wasm/mod.rs | 6 + substrate/frame/contracts/src/wasm/runtime.rs | 180 +- substrate/frame/contracts/src/weights.rs | 2246 +++++---------- 13 files changed, 1613 insertions(+), 3980 deletions(-) create mode 100644 prdoc/pr_4233.prdoc diff --git a/prdoc/pr_4233.prdoc b/prdoc/pr_4233.prdoc new file mode 100644 index 00000000000..c593fec68a6 --- /dev/null +++ b/prdoc/pr_4233.prdoc @@ -0,0 +1,14 @@ +title: "[pallet_contracts] Update Host fn benchnmarks" + +doc: + - audience: Runtime Dev + description: | + Update how the host functions are benchmarked. + Instead of benchnarking a contract that calls the host functions, we now benchmark the host functions directly. + +crates: + - name: pallet-contracts + bump: minor + - name: pallet-contracts-proc-macro + bump: minor + diff --git a/substrate/frame/contracts/README.md b/substrate/frame/contracts/README.md index 09dc770300c..2e70b5c5008 100644 --- a/substrate/frame/contracts/README.md +++ b/substrate/frame/contracts/README.md @@ -34,19 +34,6 @@ calls are reverted. Assuming correct error handling by contract A, A's other cal One `ref_time` `Weight` is defined as one picosecond of execution time on the runtime's reference machine. -#### Schedule - -The `Schedule` is where, among other things, the cost of every action a contract can do is defined. These costs are derived -from the benchmarks of this pallet. Instead of looking at the raw benchmark results it is advised to look at the `Schedule` -if one wants to manually inspect the performance characteristics. The `Schedule` can be printed like this: - -```sh -RUST_LOG=runtime::contracts=info cargo run --features runtime-benchmarks --bin substrate-node -- benchmark pallet --extra -p pallet_contracts -e print_schedule -``` - -Please note that the `Schedule` will be printed multiple times. This is because we are (ab)using a benchmark to print -the struct. - ### Revert Behaviour Contract call failures are not cascading. When failures occur in a sub-call, they do not "bubble up", and the call will diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 1794d09d5ad..356b42268da 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -132,6 +132,7 @@ struct HostFn { alias_to: Option, /// Formulating the predicate inverted makes the expression using it simpler. not_deprecated: bool, + cfg: Option, } enum HostFnReturn { @@ -163,13 +164,13 @@ impl ToTokens for HostFn { impl HostFn { pub fn try_from(mut item: syn::ItemFn) -> syn::Result { let err = |span, msg| { - let msg = format!("Invalid host function definition. {}", msg); + let msg = format!("Invalid host function definition.\n{}", msg); syn::Error::new(span, msg) }; // process attributes let msg = - "only #[version()], #[unstable], #[prefixed_alias] and #[deprecated] attributes are allowed."; + "Only #[version()], #[unstable], #[prefixed_alias], #[cfg] and #[deprecated] attributes are allowed."; let span = item.span(); let mut attrs = item.attrs.clone(); attrs.retain(|a| !a.path().is_ident("doc")); @@ -177,6 +178,7 @@ impl HostFn { let mut is_stable = true; let mut alias_to = None; let mut not_deprecated = true; + let mut cfg = None; while let Some(attr) = attrs.pop() { let ident = attr.path().get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { @@ -206,7 +208,13 @@ impl HostFn { } not_deprecated = false; }, - _ => return Err(err(span, msg)), + "cfg" => { + if cfg.is_some() { + return Err(err(span, "#[cfg] can only be specified once")) + } + cfg = Some(attr); + }, + id => return Err(err(span, &format!("Unsupported attribute \"{id}\". {msg}"))), } } let name = item.sig.ident.to_string(); @@ -311,6 +319,7 @@ impl HostFn { is_stable, alias_to, not_deprecated, + cfg, }) }, _ => Err(err(span, &msg)), @@ -528,8 +537,9 @@ fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { /// - real implementation, to register it in the contract execution environment; /// - dummy implementation, to be used as mocks for contract validation step. fn expand_impls(def: &EnvDef) -> TokenStream2 { - let impls = expand_functions(def, true, quote! { crate::wasm::Runtime }); - let dummy_impls = expand_functions(def, false, quote! { () }); + let impls = expand_functions(def, ExpandMode::Impl); + let dummy_impls = expand_functions(def, ExpandMode::MockImpl); + let bench_impls = expand_functions(def, ExpandMode::BenchImpl); quote! { impl<'a, E: Ext> crate::wasm::Environment> for Env @@ -545,6 +555,14 @@ fn expand_impls(def: &EnvDef) -> TokenStream2 { } } + #[cfg(feature = "runtime-benchmarks")] + pub struct BenchEnv(::core::marker::PhantomData); + + #[cfg(feature = "runtime-benchmarks")] + impl BenchEnv { + #bench_impls + } + impl crate::wasm::Environment<()> for Env { fn define( @@ -560,18 +578,38 @@ fn expand_impls(def: &EnvDef) -> TokenStream2 { } } -fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) -> TokenStream2 { +enum ExpandMode { + Impl, + BenchImpl, + MockImpl, +} + +impl ExpandMode { + fn expand_blocks(&self) -> bool { + match *self { + ExpandMode::Impl | ExpandMode::BenchImpl => true, + ExpandMode::MockImpl => false, + } + } + + fn host_state(&self) -> TokenStream2 { + match *self { + ExpandMode::Impl | ExpandMode::BenchImpl => quote! { crate::wasm::runtime::Runtime }, + ExpandMode::MockImpl => quote! { () }, + } + } +} + +fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 { let impls = def.host_funcs.iter().map(|f| { // skip the context and memory argument let params = f.item.sig.inputs.iter().skip(2); - - let (module, name, body, wasm_output, output) = ( - f.module(), - &f.name, - &f.item.block, - f.returns.to_wasm_sig(), - &f.item.sig.output - ); + let module = f.module(); + let cfg = &f.cfg; + let name = &f.name; + let body = &f.item.block; + let wasm_output = f.returns.to_wasm_sig(); + let output = &f.item.sig.output; let is_stable = f.is_stable; let not_deprecated = f.not_deprecated; @@ -608,23 +646,34 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) // - We replace any code by unreachable! // - Allow unused variables as the code that uses is not expanded // - We don't need to map the error as we simply panic if they code would ever be executed - let inner = if expand_blocks { - quote! { || #output { - let (memory, ctx) = __caller__ - .data() - .memory() - .expect("Memory must be set when setting up host data; qed") - .data_and_store_mut(&mut __caller__); - #wrapped_body_with_trace - } } - } else { - quote! { || -> #wasm_output { - // This is part of the implementation for `Environment<()>` which is not - // meant to be actually executed. It is only for validation which will - // never call host functions. - ::core::unreachable!() - } } + let expand_blocks = expand_mode.expand_blocks(); + let inner = match expand_mode { + ExpandMode::Impl => { + quote! { || #output { + let (memory, ctx) = __caller__ + .data() + .memory() + .expect("Memory must be set when setting up host data; qed") + .data_and_store_mut(&mut __caller__); + #wrapped_body_with_trace + } } + }, + ExpandMode::BenchImpl => { + let body = &body.stmts; + quote!{ + #(#body)* + } + }, + ExpandMode::MockImpl => { + quote! { || -> #wasm_output { + // This is part of the implementation for `Environment<()>` which is not + // meant to be actually executed. It is only for validation which will + // never call host functions. + ::core::unreachable!() + } } + }, }; + let into_host = if expand_blocks { quote! { |reason| { @@ -655,6 +704,11 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) .map_err(TrapReason::from) .map_err(#into_host)? }; + + // Charge gas for host function execution. + __caller__.data_mut().charge_gas(crate::wasm::RuntimeCosts::HostFn) + .map_err(TrapReason::from) + .map_err(#into_host)?; } } else { quote! { } @@ -676,29 +730,51 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) quote! { } }; - quote! { - // We need to allow all interfaces when runtime benchmarks are performed because - // we generate the weights even when those interfaces are not enabled. This - // is necessary as the decision whether we allow unstable or deprecated functions - // is a decision made at runtime. Generation of the weights happens statically. - if ::core::cfg!(feature = "runtime-benchmarks") || - ((#is_stable || __allow_unstable__) && (#not_deprecated || __allow_deprecated__)) - { - #allow_unused - linker.define(#module, #name, ::wasmi::Func::wrap(&mut*store, |mut __caller__: ::wasmi::Caller<#host_state>, #( #params, )*| -> #wasm_output { - #sync_gas_before - let mut func = #inner; - let result = func().map_err(#into_host).map(::core::convert::Into::into); - #sync_gas_after - result - }))?; - } + match expand_mode { + ExpandMode::BenchImpl => { + let name = Ident::new(&format!("{module}_{name}"), Span::call_site()); + quote! { + pub fn #name(ctx: &mut crate::wasm::Runtime, memory: &mut [u8], #(#params),*) #output { + #inner + } + } + }, + _ => { + let host_state = expand_mode.host_state(); + quote! { + // We need to allow all interfaces when runtime benchmarks are performed because + // we generate the weights even when those interfaces are not enabled. This + // is necessary as the decision whether we allow unstable or deprecated functions + // is a decision made at runtime. Generation of the weights happens statically. + #cfg + if ::core::cfg!(feature = "runtime-benchmarks") || + ((#is_stable || __allow_unstable__) && (#not_deprecated || __allow_deprecated__)) + { + #allow_unused + linker.define(#module, #name, ::wasmi::Func::wrap(&mut*store, |mut __caller__: ::wasmi::Caller<#host_state>, #( #params, )*| -> #wasm_output { + #sync_gas_before + let mut func = #inner; + let result = func().map_err(#into_host).map(::core::convert::Into::into); + #sync_gas_after + result + }))?; + } + } + }, } }); - quote! { - let __allow_unstable__ = matches!(allow_unstable, AllowUnstableInterface::Yes); - let __allow_deprecated__ = matches!(allow_deprecated, AllowDeprecatedInterface::Yes); - #( #impls )* + + match expand_mode { + ExpandMode::BenchImpl => { + quote! { + #( #impls )* + } + }, + _ => quote! { + let __allow_unstable__ = matches!(allow_unstable, AllowUnstableInterface::Yes); + let __allow_deprecated__ = matches!(allow_deprecated, AllowDeprecatedInterface::Yes); + #( #impls )* + }, } } diff --git a/substrate/frame/contracts/src/benchmarking/call_builder.rs b/substrate/frame/contracts/src/benchmarking/call_builder.rs index 285fe0052b4..5d73d825fca 100644 --- a/substrate/frame/contracts/src/benchmarking/call_builder.rs +++ b/substrate/frame/contracts/src/benchmarking/call_builder.rs @@ -25,6 +25,7 @@ use crate::{ }; use codec::{Encode, HasCompact}; use core::fmt::Debug; +use frame_benchmarking::benchmarking; use sp_core::Get; use sp_std::prelude::*; @@ -57,6 +58,16 @@ pub struct CallSetup { data: Vec, } +impl Default for CallSetup +where + T: Config + pallet_balances::Config, + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, +{ + fn default() -> Self { + Self::new(WasmModule::dummy()) + } +} + impl CallSetup where T: Config + pallet_balances::Config, @@ -70,6 +81,17 @@ where let storage_meter = Meter::new(&origin, None, 0u32.into()).unwrap(); + // Whitelist contract account, as it is already accounted for in the call benchmark + benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&contract.account_id).into(), + ); + + // Whitelist the contract's contractInfo as it is already accounted for in the call + // benchmark + benchmarking::add_to_whitelist( + crate::ContractInfoOf::::hashed_key_for(&contract.account_id).into(), + ); + Self { contract, dest, @@ -150,21 +172,29 @@ where } #[macro_export] -macro_rules! call_builder( - ($func: ident, $module:expr) => { - $crate::call_builder!($func, _contract, $module); +macro_rules! memory( + ($($bytes:expr,)*) => { + vec![] + .into_iter() + $(.chain($bytes))* + .collect::>() }; - ($func: ident, $contract: ident, $module:expr) => { - let mut setup = CallSetup::::new($module); - $crate::call_builder!($func, $contract, setup: setup); +); + +#[macro_export] +macro_rules! build_runtime( + ($runtime:ident, $memory:ident: [$($segment:expr,)*]) => { + $crate::build_runtime!($runtime, _contract, $memory: [$($segment,)*]); }; - ($func:ident, setup: $setup: ident) => { - $crate::call_builder!($func, _contract, setup: $setup); + ($runtime:ident, $contract:ident, $memory:ident: [$($bytes:expr,)*]) => { + $crate::build_runtime!($runtime, $contract); + let mut $memory = $crate::memory!($($bytes,)*); }; - ($func:ident, $contract: ident, setup: $setup: ident) => { - let data = $setup.data(); - let $contract = $setup.contract(); - let (mut ext, module) = $setup.ext(); - let $func = CallSetup::::prepare_call(&mut ext, module, data); + ($runtime:ident, $contract:ident) => { + let mut setup = CallSetup::::default(); + let $contract = setup.contract(); + let input = setup.data(); + let (mut ext, _) = setup.ext(); + let mut $runtime = crate::wasm::Runtime::new(&mut ext, input); }; ); diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index b97cf168e26..65bcf30683c 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -288,17 +288,15 @@ impl WasmModule { module.into() } - /// Creates a wasm module that calls the imported function named `getter_name` `repeat` - /// times. The imported function is expected to have the "getter signature" of - /// (out_ptr: u32, len_ptr: u32) -> (). - pub fn getter(module_name: &'static str, getter_name: &'static str, repeat: u32) -> Self { + /// Creates a wasm module that calls the imported function `noop` `repeat` times. + pub fn noop(repeat: u32) -> Self { let pages = max_pages::(); ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: module_name, - name: getter_name, - params: vec![ValueType::I32, ValueType::I32], + module: "seal0", + name: "noop", + params: vec![], return_type: None, }], // Write the output buffer size. The output size will be overwritten by the @@ -312,35 +310,7 @@ impl WasmModule { call_body: Some(body::repeated( repeat, &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), // call the imported function - ], - )), - ..Default::default() - } - .into() - } - - /// Creates a wasm module that calls the imported hash function named `name` `repeat` times - /// with an input of size `data_size`. Hash functions have the signature - /// (input_ptr: u32, input_len: u32, output_ptr: u32) -> () - pub fn hasher(name: &'static str, repeat: u32, data_size: u32) -> Self { - ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name, - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::repeated( - repeat, - &[ - Instruction::I32Const(0), // input_ptr - Instruction::I32Const(data_size as i32), // input_len - Instruction::I32Const(0), // output_ptr - Instruction::Call(0), + Instruction::Call(0), // call the imported function ], )), ..Default::default() @@ -353,21 +323,6 @@ impl WasmModule { pub mod body { use super::*; - /// When generating contract code by repeating a Wasm sequence, it's sometimes necessary - /// to change those instructions on each repetition. The variants of this enum describe - /// various ways in which this can happen. - pub enum DynInstr { - /// Insert the associated instruction. - Regular(Instruction), - /// Insert a I32Const with incrementing value for each insertion. - /// (start_at, increment_by) - Counter(u32, u32), - } - - pub fn plain(instructions: Vec) -> FuncBody { - FuncBody::new(Vec::new(), Instructions::new(instructions)) - } - pub fn repeated(repetitions: u32, instructions: &[Instruction]) -> FuncBody { repeated_with_locals(&[], repetitions, instructions) } @@ -401,24 +356,6 @@ pub mod body { instructions.push(Instruction::End); FuncBody::new(locals.to_vec(), Instructions::new(instructions)) } - - pub fn repeated_dyn(repetitions: u32, mut instructions: Vec) -> FuncBody { - // We need to iterate over indices because we cannot cycle over mutable references - let body = (0..instructions.len()) - .cycle() - .take(instructions.len() * usize::try_from(repetitions).unwrap()) - .flat_map(|idx| match &mut instructions[idx] { - DynInstr::Regular(instruction) => vec![instruction.clone()], - DynInstr::Counter(offset, increment_by) => { - let current = *offset; - *offset += *increment_by; - vec![Instruction::I32Const(current as i32)] - }, - }) - .chain(sp_std::iter::once(Instruction::End)) - .collect(); - FuncBody::new(Vec::new(), Instructions::new(body)) - } } /// The maximum amount of pages any contract is allowed to have according to the current `Schedule`. diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 952ef180be2..7c993bc9a77 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -23,33 +23,31 @@ mod code; mod sandbox; use self::{ call_builder::CallSetup, - code::{ - body::{self, DynInstr::*}, - DataSegment, ImportedFunction, ImportedMemory, Location, ModuleDefinition, WasmModule, - }, + code::{body, ImportedMemory, Location, ModuleDefinition, WasmModule}, sandbox::Sandbox, }; use crate::{ - exec::Key, + exec::{Key, SeedOf}, migration::{ codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, v16, MigrationStep, }, + wasm::BenchEnv, Pallet as Contracts, *, }; use codec::{Encode, MaxEncodedLen}; use frame_benchmarking::v2::*; use frame_support::{ - self, + self, assert_ok, pallet_prelude::StorageVersion, traits::{fungible::InspectHold, Currency}, weights::{Weight, WeightMeter}, }; use frame_system::RawOrigin; use pallet_balances; -use pallet_contracts_uapi::CallFlags; +use pallet_contracts_uapi::{CallFlags, ReturnErrorCode}; use sp_runtime::traits::{Bounded, Hash}; use sp_std::prelude::*; -use wasm_instrument::parity_wasm::elements::{BlockType, Instruction, Local, ValueType}; +use wasm_instrument::parity_wasm::elements::{Instruction, Local, ValueType}; /// How many runs we do per API benchmark. /// @@ -442,13 +440,6 @@ mod benchmarks { Ok(()) } - // This constructs a contract that is maximal expensive to instrument. - // It creates a maximum number of metering blocks per byte. - // The size of the salt influences the runtime because is is hashed in order to - // determine the contract address. All code is generated to the `call` function so that - // we don't benchmark the actual execution of this code but merely what it takes to load - // a code of that size into the sandbox. - // // `c`: Size of the code in bytes. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. @@ -482,7 +473,6 @@ mod benchmarks { assert_eq!(T::Currency::balance(&addr), value + Pallet::::min_balance()); } - // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. #[benchmark(pov_mode = Measured)] @@ -621,507 +611,306 @@ mod benchmarks { } #[benchmark(pov_mode = Measured)] - fn seal_caller(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_caller", r)); - - let res; + fn noop_host_fn(r: Linear<0, API_BENCHMARK_RUNS>) { + let mut setup = CallSetup::::new(WasmModule::noop(r)); + let (mut ext, module) = setup.ext(); + let func = CallSetup::::prepare_call(&mut ext, module, vec![]); #[block] { - res = func.call(); + func.call(); } - assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_is_contract(r: Linear<0, API_BENCHMARK_RUNS>) { - let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); - let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); - let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_is_contract", - params: vec![ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: accounts_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, account_len as u32), // address_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info().unwrap(); - // every account would be a contract (worst case) - for acc in accounts.iter() { - >::insert(acc, info.clone()); - } + fn seal_caller() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_caller(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!( + &::decode(&mut &memory[4..]).unwrap(), + runtime.ext().caller().account_id().unwrap() + ); } #[benchmark(pov_mode = Measured)] - fn seal_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { - let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); - let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); - let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_code_hash", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { - offset: 0, - value: 32u32.to_le_bytes().to_vec(), // output length - }, - DataSegment { offset: 36, value: accounts_bytes }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(36, account_len as u32), // address_ptr - Regular(Instruction::I32Const(4)), // ptr to output data - Regular(Instruction::I32Const(0)), // ptr to output length - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info().unwrap(); - // every account would be a contract (worst case) - for acc in accounts.iter() { - >::insert(acc, info.clone()); - } + fn seal_is_contract() { + let Contract { account_id, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); - let res; + build_runtime!(runtime, memory: [account_id.encode(), ]); + + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_is_contract(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_eq!(result.unwrap(), 1); } #[benchmark(pov_mode = Measured)] - fn seal_own_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_own_code_hash", r)); + fn seal_code_hash() { + let contract = Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], contract.account_id.encode(), ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_code_hash(&mut runtime, &mut memory, 4 + len, 4, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!( + as Decode>::decode(&mut &memory[4..]).unwrap(), + contract.info().unwrap().code_hash + ); } #[benchmark(pov_mode = Measured)] - fn seal_caller_is_origin(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_caller_is_origin", - params: vec![], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - call_builder!(func, code); - - let res; + fn seal_own_code_hash() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, contract, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_own_code_hash(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!( + as Decode>::decode(&mut &memory[4..]).unwrap(), + contract.info().unwrap().code_hash + ); } #[benchmark(pov_mode = Measured)] - fn seal_caller_is_root(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "caller_is_root", - params: vec![], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_origin(Origin::Root); - call_builder!(func, setup: setup); + fn seal_caller_is_origin() { + build_runtime!(runtime, memory: []); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_caller_is_origin(&mut runtime, &mut memory); } - assert_eq!(res.did_revert(), false); + assert_eq!(result.unwrap(), 1u32); } #[benchmark(pov_mode = Measured)] - fn seal_address(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_address", r)); + fn seal_caller_is_root() { + let mut setup = CallSetup::::default(); + setup.set_origin(Origin::Root); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_caller_is_root(&mut runtime, &mut [0u8; 0]); } - assert_eq!(res.did_revert(), false); + assert_eq!(result.unwrap(), 1u32); } #[benchmark(pov_mode = Measured)] - fn seal_gas_left(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal1", "gas_left", r)); + fn seal_address() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_address(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + &::decode(&mut &memory[4..]).unwrap(), + runtime.ext().address() + ); } #[benchmark(pov_mode = Measured)] - fn seal_balance(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_balance", r)); + fn seal_gas_left() { + // use correct max_encoded_len when new version of parity-scale-codec is released + let len = 18u32; + assert!(::max_encoded_len() as u32 != len); + build_runtime!(runtime, memory: [32u32.to_le_bytes(), vec![0u8; len as _], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_gas_left(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().gas_meter().gas_left() + ); } #[benchmark(pov_mode = Measured)] - fn seal_value_transferred(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_value_transferred", r)); - - let res; + fn seal_balance() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_balance(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().balance().into() + ); } #[benchmark(pov_mode = Measured)] - fn seal_minimum_balance(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_minimum_balance", r)); - - let res; + fn seal_value_transferred() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_value_transferred(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().value_transferred().into() + ); } #[benchmark(pov_mode = Measured)] - fn seal_block_number(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_block_number", r)); - - let res; + fn seal_minimum_balance() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_minimum_balance(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().minimum_balance().into() + ); } #[benchmark(pov_mode = Measured)] - fn seal_now(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_now", r)); - - let res; + fn seal_block_number() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_block_number(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + >::decode(&mut &memory[4..]).unwrap(), + runtime.ext().block_number() + ); } #[benchmark(pov_mode = Measured)] - fn seal_weight_to_fee(r: Linear<0, API_BENCHMARK_RUNS>) { - let pages = code::max_pages::(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "weight_to_fee", - params: vec![ValueType::I64, ValueType::I64, ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { - offset: 0, - value: (pages * 64 * 1024 - 4).to_le_bytes().to_vec(), - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I64Const(500_000), - Instruction::I64Const(300_000), - Instruction::I32Const(4), - Instruction::I32Const(0), - Instruction::Call(0), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; + fn seal_now() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_now(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!(>::decode(&mut &memory[4..]).unwrap(), *runtime.ext().now()); } #[benchmark(pov_mode = Measured)] - fn seal_input(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_input", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { offset: 0, value: 0u32.to_le_bytes().to_vec() }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), - ], - )), - ..Default::default() - }); - - call_builder!(func, code); - - let res; + fn seal_weight_to_fee() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let weight = Weight::from_parts(500_000, 300_000); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_weight_to_fee( + &mut runtime, + &mut memory, + weight.ref_time(), + weight.proof_size(), + 4, + 0, + ); } - assert_eq!(res.did_revert(), false); - } - - #[benchmark(pov_mode = Measured)] - fn seal_input_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let buffer_size = code::max_pages::() * 64 * 1024 - 4; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_input", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { - offset: 0, - value: buffer_size.to_le_bytes().to_vec(), - }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), - Instruction::End, - ])), - ..Default::default() - }); - let instance = Contract::::new(code, vec![])?; - let data = vec![42u8; n.min(buffer_size) as usize]; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, data); - Ok(()) + assert_ok!(result); + assert_eq!( + >::decode(&mut &memory[4..]).unwrap(), + runtime.ext().get_weight_price(weight) + ); } - // We cannot call `seal_return` multiple times. Therefore our weight determination is not - // as precise as with other APIs. Because this function can only be called once per - // contract it cannot be used as an attack vector. #[benchmark(pov_mode = Measured)] - fn seal_return(r: Linear<0, 1>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_return", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // flags - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(0), // data_len - Instruction::Call(0), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; + fn seal_input(n: Linear<0, { code::max_pages::() * 64 * 1024 - 4 }>) { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; n as usize]); + let mut memory = memory!(n.to_le_bytes(), vec![0u8; n as usize],); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_input(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!(&memory[4..], &vec![42u8; n as usize]); } #[benchmark(pov_mode = Measured)] - fn seal_return_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_return", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // flags - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(n as i32), // data_len - Instruction::Call(0), - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, code); + fn seal_return(n: Linear<0, { code::max_pages::() * 64 * 1024 - 4 }>) { + build_runtime!(runtime, memory: [n.to_le_bytes(), vec![42u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_return(&mut runtime, &mut memory, 0, 0, n); } - assert_eq!(res.did_revert(), false); + + assert!(matches!( + result, + Err(crate::wasm::TrapReason::Return(crate::wasm::ReturnData { .. })) + )); } - // The same argument as for `seal_return` is true here. #[benchmark(pov_mode = Measured)] - fn seal_terminate(r: Linear<0, 1>) -> Result<(), BenchmarkError> { + fn seal_terminate( + n: Linear<0, { T::MaxDelegateDependencies::get() }>, + ) -> Result<(), BenchmarkError> { let beneficiary = account::("beneficiary", 0, 0); - let beneficiary_bytes = beneficiary.encode(); - let beneficiary_len = beneficiary_bytes.len(); let caller = whitelisted_caller(); + build_runtime!(runtime, memory: [beneficiary.encode(),]); + T::Currency::set_balance(&caller, caller_funding::()); - // Maximize the delegate_dependencies to account for the worst-case scenario. - let code_hashes = (0..T::MaxDelegateDependencies::get()) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(65 + i); - Contracts::::store_code_raw(new_code.code, caller.clone())?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ - ImportedFunction { - module: "seal0", - name: "seal_terminate", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }, - ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, - ], - data_segments: vec![ - DataSegment { offset: 0, value: beneficiary_bytes }, - DataSegment { offset: beneficiary_len as u32, value: code_hashes_bytes }, - ], - deploy_body: Some(body::repeated_dyn( - T::MaxDelegateDependencies::get(), - vec![ - Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(1)), - ], - )), - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // beneficiary_ptr - Instruction::I32Const(beneficiary_len as i32), // beneficiary_len - Instruction::Call(0), - ], - )), - ..Default::default() + (0..n).for_each(|i| { + let new_code = WasmModule::::dummy_with_bytes(65 + i); + Contracts::::store_code_raw(new_code.code, caller.clone()).unwrap(); + runtime.ext().lock_delegate_dependency(new_code.hash).unwrap(); }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); - assert_eq!( - T::Currency::balance(&instance.account_id), - Pallet::::min_balance() * 2u32.into() - ); - assert_ne!( - T::Currency::balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &instance.account_id - ), - 0u32.into() - ); - assert_eq!( - ContractInfoOf::::get(&instance.account_id) - .unwrap() - .delegate_dependencies_count() as u32, - T::MaxDelegateDependencies::get() - ); - #[extrinsic_call] - call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]); - - if r > 0 { - assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!( - T::Currency::balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &instance.account_id - ), - 0u32.into() - ); - assert_eq!( - T::Currency::total_balance(&beneficiary), - Pallet::::min_balance() * 2u32.into() - ); + + let result; + #[block] + { + result = BenchEnv::seal1_terminate(&mut runtime, &mut memory, 0); } + + assert!(matches!(result, Err(crate::wasm::TrapReason::Termination))); + Ok(()) } @@ -1129,161 +918,77 @@ mod benchmarks { // number (< 1 KB). Therefore we are not overcharging too much in case a smaller subject is // used. #[benchmark(pov_mode = Measured)] - fn seal_random(r: Linear<0, API_BENCHMARK_RUNS>) { - let pages = code::max_pages::(); + fn seal_random() { let subject_len = T::Schedule::get().limits.subject_len; assert!(subject_len < 1024); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_random", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { - offset: 0, - value: (pages * 64 * 1024 - subject_len - 4).to_le_bytes().to_vec(), - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(4), // subject_ptr - Instruction::I32Const(subject_len as i32), // subject_len - Instruction::I32Const((subject_len + 4) as i32), // out_ptr - Instruction::I32Const(0), // out_len_ptr - Instruction::Call(0), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + let output_len = + <(SeedOf, BlockNumberFor) as MaxEncodedLen>::max_encoded_len() as u32; - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - } + build_runtime!(runtime, memory: [ + output_len.to_le_bytes(), + vec![42u8; subject_len as _], + vec![0u8; output_len as _], + ]); - // Overhead of calling the function without any topic. - // We benchmark for the worst case (largest event). - #[benchmark(pov_mode = Measured)] - fn seal_deposit_event(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_deposit_event", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // topics_ptr - Instruction::I32Const(0), // topics_len - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(0), // data_len - Instruction::Call(0), - ], - )), - ..Default::default() - }); - - call_builder!(func, code); - - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_random( + &mut runtime, + &mut memory, + 4, // subject_ptr + subject_len, // subject_len + subject_len + 4, // output_ptr + 0, // output_len_ptr + ); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_ok!(<(SeedOf, BlockNumberFor)>::decode(&mut &memory[subject_len as _..])); } // Benchmark the overhead that topics generate. // `t`: Number of topics // `n`: Size of event payload in bytes #[benchmark(pov_mode = Measured)] - fn seal_deposit_event_per_topic_and_byte( + fn seal_deposit_event( t: Linear<0, { T::Schedule::get().limits.event_topics }>, n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) { let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); - let topics_len = topics.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_deposit_event", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { offset: 0, value: topics }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // topics_ptr - Instruction::I32Const(topics_len as i32), // topics_len - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(n as i32), // data_len - Instruction::Call(0), - Instruction::End, - ])), - ..Default::default() - }); + let topics_len = topics.len() as u32; - call_builder!(func, code); + build_runtime!(runtime, memory: [ + n.to_le_bytes(), + topics, + vec![0u8; n as _], + ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_deposit_event( + &mut runtime, + &mut memory, + 4, // topics_ptr + topics_len, // topics_len + 4 + topics_len, // data_ptr + 0, // data_len + ); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); } - // Benchmark debug_message call with zero input data. + // Benchmark debug_message call // Whereas this function is used in RPC mode only, it still should be secured // against an excessive use. - #[benchmark(pov_mode = Measured)] - fn seal_debug_message(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory { min_pages: 1, max_pages: 1 }), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_debug_message", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // value_len - Instruction::Call(0), - Instruction::Drop, - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.enable_debug_message(); - call_builder!(func, setup: setup); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - // Vary size of input in bytes up to maximum allowed contract memory - // or maximum allowed debug buffer size, whichever is less. + // + // i: size of input in bytes up to maximum allowed contract memory or maximum allowed debug + // buffer size, whichever is less. #[benchmark] - fn seal_debug_message_per_byte( + fn seal_debug_message( i: Linear< 0, { @@ -1291,1619 +996,586 @@ mod benchmarks { .min(T::MaxDebugBufferLen::get()) }, >, - ) -> Result<(), BenchmarkError> { - // We benchmark versus messages containing printable ASCII codes. - // About 1Kb goes to the contract code instructions, - // whereas all the space left we use for the initialization of the debug messages data. - let message = (0..T::MaxCodeLen::get() - 1024) - .zip((32..127).cycle()) - .map(|i| i.1) - .collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory { - min_pages: T::Schedule::get().limits.memory_pages, - max_pages: T::Schedule::get().limits.memory_pages, - }), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_debug_message", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: message }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(i as i32), // value_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); + ) { + let mut setup = CallSetup::::default(); setup.enable_debug_message(); - call_builder!(func, setup: setup); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + // Fill memory with printable ASCII bytes. + let mut memory = (0..i).zip((32..127).cycle()).map(|i| i.1).collect::>(); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_debug_message(&mut runtime, &mut memory, 0, i); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); assert_eq!(setup.debug_message().unwrap().len() as u32, i); - Ok(()) } - // Only the overhead of calling the function itself with minimal arguments. - // The contract is a bit more complex because it needs to use different keys in order - // to generate unique storage accesses. However, it is still dominated by the storage - // accesses. We store something at all the keys that we are about to write to - // because re-writing at an existing key is always more expensive than writing - // to an key with no data behind it. - // - // # Note - // - // We need to use a smaller `r` because the keys are big and writing them all into the wasm - // might exceed the code size. + // n: new byte size + // o: old byte size #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_set_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let keys_bytes = keys.iter().flatten().cloned().collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: keys_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // value_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_set_storage_per_new_byte( + fn seal_set_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, + o: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(n as i32), // value_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + let value = vec![1u8; n as usize]; - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_set_storage_per_old_byte( - n: Linear<0, { T::Schedule::get().limits.payload_len }>, - ) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), /* value_len is 0 as testing vs - * pre-existing value len */ - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + build_runtime!(runtime, instance, memory: [ key.to_vec(), value.clone(), ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - // Similar to seal_set_storage. We store all the keys that we are about to - // delete beforehand in order to prevent any optimizations that could occur when - // deleting a non existing key. We generate keys of a maximum length, and have to - // the amount of runs in order to make resulting contract code size less than MaxCodeLen. - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_clear_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "clear_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + info.write(&key, Some(vec![42u8; o as usize]), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal2_set_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + max_key_len, // value_ptr + n, // value_len + ); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!(info.read(&key).unwrap(), value); Ok(()) } #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_clear_storage_per_byte( + fn seal_clear_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "clear_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - // We make sure that all storage accesses are to unique keys. - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_get_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "get_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key_bytes }, - DataSegment { - offset: key_bytes_len as u32, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + info.write(&key, Some(vec![42u8; n as usize]), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_clear_storage(&mut runtime, &mut memory, 0, max_key_len); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert!(info.read(&key).is_none()); Ok(()) } #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_get_storage_per_byte( + fn seal_get_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "get_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key.clone() }, - DataSegment { - offset: max_key_len, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const((max_key_len + 4) as i32), // out_ptr - Instruction::I32Const(max_key_len as i32), // out_len_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), n.to_le_bytes(), vec![0u8; n as _], ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - >::insert(&instance.account_id, info); - let res; + info.write(&key, Some(vec![42u8; n as usize]), None, false) + .map_err(|_| "Failed to write to storage during setup.")?; + + let out_ptr = max_key_len + 4; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_get_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + out_ptr, // out_ptr + max_key_len, // out_len_ptr + ); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!(&info.read(&key).unwrap(), &memory[out_ptr as usize..]); Ok(()) } - // We make sure that all storage accesses are to unique keys. #[benchmark(skip_meta, pov_mode = Measured)] fn seal_contains_storage( - r: Linear<0, { API_BENCHMARK_RUNS / 2 }>, + n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "contains_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), ]); let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + + info.write(&key, Some(vec![42u8; n as usize]), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_contains_storage(&mut runtime, &mut memory, 0, max_key_len); } - assert_eq!(res.did_revert(), false); + + assert_eq!(result.unwrap(), n); Ok(()) } #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_contains_storage_per_byte( + fn seal_take_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "contains_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), n.to_le_bytes(), vec![0u8; n as _], ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - >::insert(&instance.account_id, info); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_take_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "take_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key_bytes }, - DataSegment { - offset: key_bytes_len as u32, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + let value = vec![42u8; n as usize]; + info.write(&key, Some(value.clone()), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let out_ptr = max_key_len + 4; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_take_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + out_ptr, // out_ptr + max_key_len, // out_len_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) - } - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_take_storage_per_byte( - n: Linear<0, { T::Schedule::get().limits.payload_len }>, - ) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "take_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key.clone() }, - DataSegment { - offset: max_key_len, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const((max_key_len + 4) as i32), // out_ptr - Instruction::I32Const(max_key_len as i32), // out_len_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - >::insert(&instance.account_id, info); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert!(&info.read(&key).is_none()); + assert_eq!(&value, &memory[out_ptr as usize..]); Ok(()) } // We transfer to unique accounts. #[benchmark(pov_mode = Measured)] - fn seal_transfer(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let accounts = - (0..r).map(|i| account::("receiver", i, 0)).collect::>(); - let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); - let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); + fn seal_transfer() { + let account = account::("receiver", 0, 0); let value = Pallet::::min_balance(); assert!(value > 0u32.into()); - let value_bytes = value.encode(); - let value_len = value_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_transfer", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: value_len as u32, value: account_bytes }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(value_len as u32, account_len as u32), // account_ptr - Regular(Instruction::I32Const(account_len as i32)), // account_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(value_len as i32)), // value_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_balance(value * (r + 1).into()); - call_builder!(func, setup: setup); - for account in &accounts { - assert_eq!(T::Currency::total_balance(account), 0u32.into()); - } + let mut setup = CallSetup::::default(); + setup.set_balance(value); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - - for account in &accounts { - assert_eq!(T::Currency::total_balance(account), value); - } - Ok(()) - } - - // We call unique accounts. - // - // This is a slow call: We reduce the number of runs. - #[benchmark(pov_mode = Measured)] - fn seal_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let dummy_code = WasmModule::::dummy_with_bytes(0); - let callees = (0..r) - .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) - .collect::, _>>()?; - let callee_len = callees.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); - let callee_bytes = callees.iter().flat_map(|x| x.account_id.encode()).collect(); - let value: BalanceOf = 0u32.into(); + let account_bytes = account.encode(); + let account_len = account_bytes.len() as u32; let value_bytes = value.encode(); - let value_len = BalanceOf::::max_encoded_len() as u32; - // Set an own limit every 2nd call - let own_limit = (u32::MAX - 100).into(); - let deposits = (0..r) - .map(|i| if i % 2 == 0 { 0u32.into() } else { own_limit }) - .collect::>>(); - let deposits_bytes: Vec = deposits.iter().flat_map(|i| i.encode()).collect(); - let deposits_len = deposits_bytes.len() as u32; - let deposit_len = value_len; - let callee_offset = value_len + deposits_len; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "call", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I64, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: value_len, value: deposits_bytes }, - DataSegment { offset: callee_offset, value: callee_bytes }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Regular(Instruction::I32Const(0)), // flags - Counter(callee_offset, callee_len as u32), // callee_ptr - Regular(Instruction::I64Const(0)), // ref_time weight - Regular(Instruction::I64Const(0)), // proof_size weight - Counter(value_len, deposit_len as u32), // deposit_limit_ptr - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_storage_deposit_limit(BalanceOf::::from(u32::MAX.into())); - call_builder!(func, setup: setup); + let value_len = value_bytes.len() as u32; + let mut memory = memory!(account_bytes, value_bytes,); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_transfer( + &mut runtime, + &mut memory, + 0, // account_ptr + account_len, + account_len, + value_len, + ); } - assert_eq!(res.did_revert(), false); - Ok(()) - } - // This is a slow call: We reduce the number of runs. - #[benchmark(pov_mode = Measured)] - fn seal_delegate_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let hashes = (0..r) - .map(|i| { - let code = WasmModule::::dummy_with_bytes(i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(code.code, caller)?; - Ok(code.hash) - }) - .collect::, &'static str>>()?; - let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); - let hashes_offset = 0; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_delegate_call", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: hashes_offset as u32, value: hashes_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Regular(Instruction::I32Const(0)), // flags - Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) + assert_ok!(result); } + // t: with or without some value to transfer + // i: size of the input data #[benchmark(pov_mode = Measured)] - fn seal_call_per_transfer_clone_byte( - t: Linear<0, { 1 }>, - c: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let callee = Contract::with_index(5, >::dummy(), vec![])?; + fn seal_call(t: Linear<0, 1>, i: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + let Contract { account_id: callee, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + let callee_bytes = callee.encode(); + let callee_len = callee_bytes.len() as u32; + let value: BalanceOf = t.into(); let value_bytes = value.encode(); - let value_len = value_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "seal_call", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: value_len as u32, value: callee.account_id.encode() }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(CallFlags::CLONE_INPUT.bits() as i32), // flags - Instruction::I32Const(value_len as i32), // callee_ptr - Instruction::I64Const(0), // gas - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // input_data_ptr - Instruction::I32Const(0), // input_data_len - Instruction::I32Const(SENTINEL as i32), // output_ptr - Instruction::I32Const(0), // output_len_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_data(vec![42; c as usize]); - call_builder!(func, setup: setup); - let res; - #[block] - { - res = func.call(); + let deposit: BalanceOf = (u32::MAX - 100).into(); + let deposit_bytes = deposit.encode(); + let deposit_len = deposit_bytes.len() as u32; + + let mut setup = CallSetup::::default(); + setup.set_storage_deposit_limit(deposit); + setup.set_data(vec![42; i as usize]); + setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); + + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + let mut memory = memory!(callee_bytes, deposit_bytes, value_bytes,); + + let result; + #[block] + { + result = BenchEnv::seal2_call( + &mut runtime, + &mut memory, + CallFlags::CLONE_INPUT.bits(), // flags + 0, // callee_ptr + 0, // ref_time_limit + 0, // proof_size_limit + callee_len, // deposit_ptr + callee_len + deposit_len, // value_ptr + 0, // input_data_ptr + 0, // input_data_len + SENTINEL, // output_ptr + 0, // output_len_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_ok!(result); } - // We assume that every instantiate sends at least the minimum balance. - // This is a slow call: we reduce the number of runs. #[benchmark(pov_mode = Measured)] - fn seal_instantiate(r: Linear<1, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let hashes = (0..r) - .map(|i| { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - call_body: Some(body::plain(vec![ - // We need to add this in order to make contracts unique, - // so that they can be deployed from the same sender. - Instruction::I32Const(i as i32), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(code.code, caller)?; - Ok(code.hash) - }) - .collect::, &'static str>>()?; - let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); - let hashes_len = &hashes_bytes.len(); - let value = Pallet::::min_balance(); - assert!(value > 0u32.into()); - let value_bytes = value.encode(); - let value_len = BalanceOf::::max_encoded_len(); - let addr_len = T::AccountId::max_encoded_len(); - // Offsets where to place static data in contract memory. - let hashes_offset = value_len; - let addr_len_offset = hashes_offset + hashes_len; - let addr_offset = addr_len_offset + addr_len; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "instantiate", - params: vec![ - ValueType::I32, - ValueType::I64, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: hashes_offset as u32, value: hashes_bytes }, - DataSegment { - offset: addr_len_offset as u32, - value: addr_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I64Const(0)), // ref_time weight - Regular(Instruction::I64Const(0)), // proof_size weight - Regular(Instruction::I32Const(SENTINEL as i32)), /* deposit limit ptr: use - * parent's limit */ - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(addr_offset as i32)), // address_ptr - Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::I32Const(0)), // salt_ptr - Regular(Instruction::I32Const(0)), // salt_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_balance((value + Pallet::::min_balance()) * (r + 1).into()); - call_builder!(func, instance, setup: setup); - let addresses = hashes - .iter() - .map(|hash| Contracts::::contract_address(&instance.account_id, hash, &[], &[])) - .collect::>(); - - for addr in &addresses { - if ContractInfoOf::::get(&addr).is_some() { - return Err("Expected that contract does not exist at this point.".into()); - } - } + fn seal_delegate_call() -> Result<(), BenchmarkError> { + let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + + let mut setup = CallSetup::::default(); + setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); + + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + let mut memory = memory!(hash.encode(),); - let res; + let result; #[block] { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - for addr in &addresses { - ContractInfoOf::::get(&addr).ok_or("Contract should have been instantiated")?; + result = BenchEnv::seal0_delegate_call( + &mut runtime, + &mut memory, + 0, // flags + 0, // code_hash_ptr + 0, // input_data_ptr + 0, // input_data_len + SENTINEL, // output_ptr + 0, + ); } + + assert_ok!(result); Ok(()) } + // t: value to transfer + // i: size of input in bytes + // s: size of salt in bytes #[benchmark(pov_mode = Measured)] - fn seal_instantiate_per_transfer_input_salt_byte( + fn seal_instantiate( t: Linear<0, 1>, i: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, s: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, ) -> Result<(), BenchmarkError> { - let callee_code = WasmModule::::dummy(); - let hash_bytes = callee_code.hash.encode(); - let hash_len = hash_bytes.len(); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(callee_code.code, caller)?; + let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + let hash_bytes = hash.encode(); + let hash_len = hash_bytes.len() as u32; + let value: BalanceOf = t.into(); let value_bytes = value.encode(); + let value_len = value_bytes.len() as u32; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "seal_instantiate", - params: vec![ - ValueType::I32, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: hash_bytes }, - DataSegment { offset: hash_len as u32, value: value_bytes }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0 as i32), // code_hash_ptr - Instruction::I64Const(0), // gas - Instruction::I32Const(hash_len as i32), // value_ptr - Instruction::I32Const(0 as i32), // input_data_ptr - Instruction::I32Const(i as i32), // input_data_len - Instruction::I32Const(SENTINEL as i32), // address_ptr - Instruction::I32Const(0), // address_len_ptr - Instruction::I32Const(SENTINEL as i32), // output_ptr - Instruction::I32Const(0), // output_len_ptr - Instruction::I32Const(0 as i32), // salt_ptr - Instruction::I32Const(s as i32), // salt_len - Instruction::Call(0), - Instruction::I32Eqz, - Instruction::If(BlockType::NoResult), - Instruction::Nop, - Instruction::Else, - Instruction::Unreachable, - Instruction::End, - Instruction::End, - ])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); + let deposit: BalanceOf = 0u32.into(); + let deposit_bytes = deposit.encode(); + let deposit_len = deposit_bytes.len() as u32; + + let mut setup = CallSetup::::default(); + setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); setup.set_balance(value + (Pallet::::min_balance() * 2u32.into())); - call_builder!(func, setup: setup); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } + let account_id = &setup.contract().account_id.clone(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - // Only the overhead of calling the function itself with minimal arguments. - #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", r, 0)); + let input = vec![42u8; i as _]; + let salt = vec![42u8; s as _]; + let addr = Contracts::::contract_address(&account_id, &hash, &input, &salt); + let mut memory = memory!(hash_bytes, deposit_bytes, value_bytes, input, salt,); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - } - - // `n`: Input to hash in bytes - #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", 1, n)); - - let res; - #[block] - { - res = func.call(); + let mut offset = { + let mut current = 0u32; + move |after: u32| { + current += after; + current + } + }; + + assert!(ContractInfoOf::::get(&addr).is_none()); + + let result; + #[block] + { + result = BenchEnv::seal2_instantiate( + &mut runtime, + &mut memory, + 0, // code_hash_ptr + 0, // ref_time_limit + 0, // proof_size_limit + offset(hash_len), // deposit_ptr + offset(deposit_len), // value_ptr + offset(value_len), // input_data_ptr + i, // input_data_len + SENTINEL, // address_ptr + 0, // address_len_ptr + SENTINEL, // output_ptr + 0, // output_len_ptr + offset(i), // salt_ptr + s, // salt_len + ); } - assert_eq!(res.did_revert(), false); - } - // Only the overhead of calling the function itself with minimal arguments. - #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", r, 0)); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert!(ContractInfoOf::::get(&addr).is_some()); + Ok(()) } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", 1, n)); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - } - - // Only the overhead of calling the function itself with minimal arguments. - #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", r, 0)); + fn seal_hash_sha2_256(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_sha2_256(&mut runtime, &mut memory, 32, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::sha2_256(&memory[32..]), &memory[0..32]); + assert_ok!(result); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", 1, n)); + fn seal_hash_keccak_256(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_keccak_256(&mut runtime, &mut memory, 32, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::keccak_256(&memory[32..]), &memory[0..32]); + assert_ok!(result); } - // Only the overhead of calling the function itself with minimal arguments. + // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", r, 0)); + fn seal_hash_blake2_256(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_blake2_256(&mut runtime, &mut memory, 32, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::blake2_256(&memory[32..]), &memory[0..32]); + assert_ok!(result); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", 1, n)); + fn seal_hash_blake2_128(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 16], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_blake2_128(&mut runtime, &mut memory, 16, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::blake2_128(&memory[16..]), &memory[0..16]); + assert_ok!(result); } // `n`: Message input length to verify in bytes. // need some buffer so the code size does not exceed the max code size. #[benchmark(pov_mode = Measured)] - fn seal_sr25519_verify_per_byte( - n: Linear<0, { T::MaxCodeLen::get() - 255 }>, - ) -> Result<(), BenchmarkError> { + fn seal_sr25519_verify(n: Linear<0, { T::MaxCodeLen::get() - 255 }>) { let message = (0..n).zip((32u8..127u8).cycle()).map(|(_, c)| c).collect::>(); - let message_len = message.len() as i32; + let message_len = message.len() as u32; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let pub_key = sp_io::crypto::sr25519_generate(key_type, None); let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); let sig = AsRef::<[u8; 64]>::as_ref(&sig).to_vec(); + let sig_len = sig.len() as u32; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "sr25519_verify", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: sig }, - DataSegment { offset: 64, value: pub_key.to_vec() }, - DataSegment { offset: 96, value: message }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // signature_ptr - Instruction::I32Const(64), // pub_key_ptr - Instruction::I32Const(message_len), // message_len - Instruction::I32Const(96), // message_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - - call_builder!(func, code); + build_runtime!(runtime, memory: [sig, pub_key.to_vec(), message, ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_sr25519_verify( + &mut runtime, + &mut memory, + 0, // signature_ptr + sig_len, // pub_key_ptr + message_len, // message_len + sig_len + pub_key.len() as u32, // message_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) - } - // Only calling the function itself with valid arguments. - // It generates different private keys and signatures for the message "Hello world". - // This is a slow call: We reduce the number of runs. - #[benchmark(pov_mode = Measured)] - fn seal_sr25519_verify( - r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, - ) -> Result<(), BenchmarkError> { - let message = b"Hello world".to_vec(); - let message_len = message.len() as i32; - let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let sig_params = (0..r) - .flat_map(|_| { - let pub_key = sp_io::crypto::sr25519_generate(key_type, None); - let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message) - .expect("Generates signature"); - let data: [u8; 96] = [AsRef::<[u8]>::as_ref(&sig), AsRef::<[u8]>::as_ref(&pub_key)] - .concat() - .try_into() - .unwrap(); - data - }) - .collect::>(); - let sig_params_len = sig_params.len() as i32; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "sr25519_verify", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: sig_params }, - DataSegment { offset: sig_params_len as u32, value: message }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, 96), // signature_ptr - Counter(64, 96), // pub_key_ptr - Regular(Instruction::I32Const(message_len)), // message_len - Regular(Instruction::I32Const(sig_params_len)), // message_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) + assert_eq!(result.unwrap(), ReturnErrorCode::Success); } - // Only calling the function itself with valid arguments. - // It generates different private keys and signatures for the message "Hello world". - // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] - fn seal_ecdsa_recover(r: Linear<0, { API_BENCHMARK_RUNS / 10 }>) -> Result<(), BenchmarkError> { + fn seal_ecdsa_recover() { let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let signatures = (0..r) - .map(|_| { - let pub_key = sp_io::crypto::ecdsa_generate(key_type, None); - let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash) - .expect("Generates signature"); - AsRef::<[u8; 65]>::as_ref(&sig).to_vec() - }) - .collect::>(); - let signatures = signatures.iter().flatten().cloned().collect::>(); - let signatures_bytes_len = signatures.len() as i32; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_ecdsa_recover", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: message_hash[..].to_vec() }, - DataSegment { offset: 32, value: signatures }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(32, 65), // signature_ptr - Regular(Instruction::I32Const(0)), // message_hash_ptr - Regular(Instruction::I32Const(signatures_bytes_len + 32)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + let signature = { + let pub_key = sp_io::crypto::ecdsa_generate(key_type, None); + let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash) + .expect("Generates signature"); + AsRef::<[u8; 65]>::as_ref(&sig).to_vec() + }; - let res; + build_runtime!(runtime, memory: [signature, message_hash, [0u8; 33], ]); + + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_ecdsa_recover( + &mut runtime, + &mut memory, + 0, // signature_ptr + 65, // message_hash_ptr + 65 + 32, // output_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_eq!(result.unwrap(), ReturnErrorCode::Success); } // Only calling the function itself for the list of // generated different ECDSA keys. // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] - fn seal_ecdsa_to_eth_address( - r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, - ) -> Result<(), BenchmarkError> { + fn seal_ecdsa_to_eth_address() { let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let pub_keys_bytes = (0..r) - .flat_map(|_| sp_io::crypto::ecdsa_generate(key_type, None).0) - .collect::>(); - let pub_keys_bytes_len = pub_keys_bytes.len() as i32; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_ecdsa_to_eth_address", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: pub_keys_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, 33), // pub_key_ptr - Regular(Instruction::I32Const(pub_keys_bytes_len)), // out_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + let pub_key_bytes = sp_io::crypto::ecdsa_generate(key_type, None).0; + build_runtime!(runtime, memory: [[0u8; 20], pub_key_bytes,]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_ecdsa_to_eth_address( + &mut runtime, + &mut memory, + 20, // key_ptr + 0, // output_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_ok!(result); + assert_eq!(&memory[..20], runtime.ext().ecdsa_to_eth_address(&pub_key_bytes).unwrap()); } #[benchmark(pov_mode = Measured)] - fn seal_set_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let code_hashes = (0..r) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(new_code.code, caller)?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_set_code_hash", - params: vec![ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + fn seal_set_code_hash() -> Result<(), BenchmarkError> { + let code_hash = + Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + + build_runtime!(runtime, memory: [ code_hash.encode(),]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_set_code_hash(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); Ok(()) } #[benchmark(pov_mode = Measured)] - fn lock_delegate_dependency( - r: Linear<0, { T::MaxDelegateDependencies::get() }>, - ) -> Result<(), BenchmarkError> { - let code_hashes = (0..r) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(65 + i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(new_code.code, caller)?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + fn lock_delegate_dependency() -> Result<(), BenchmarkError> { + let code_hash = Contract::::with_index(1, WasmModule::dummy_with_bytes(1), vec![])? + .info()? + .code_hash; - let res; + build_runtime!(runtime, memory: [ code_hash.encode(),]); + + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_lock_delegate_dependency(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); Ok(()) } #[benchmark] - fn unlock_delegate_dependency( - r: Linear<0, { T::MaxDelegateDependencies::get() }>, - ) -> Result<(), BenchmarkError> { - let code_hashes = (0..r) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(65 + i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(new_code.code, caller)?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; + fn unlock_delegate_dependency() -> Result<(), BenchmarkError> { + let code_hash = Contract::::with_index(1, WasmModule::dummy_with_bytes(1), vec![])? + .info()? + .code_hash; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ - ImportedFunction { - module: "seal0", - name: "unlock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, - ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, - ], - data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], - deploy_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(1)), - ], - )), - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + build_runtime!(runtime, memory: [ code_hash.encode(),]); + BenchEnv::seal0_lock_delegate_dependency(&mut runtime, &mut memory, 0).unwrap(); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_unlock_delegate_dependency(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); Ok(()) } #[benchmark(pov_mode = Measured)] - fn seal_reentrance_count(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "reentrance_count", - params: vec![], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_reentrance_count() { + build_runtime!(runtime, memory: []); + let result; + #[block] + { + result = BenchEnv::seal0_reentrance_count(&mut runtime, &mut memory) + } + + assert_eq!(result.unwrap(), 0); } #[benchmark(pov_mode = Measured)] - fn seal_account_reentrance_count( - r: Linear<0, API_BENCHMARK_RUNS>, - ) -> Result<(), BenchmarkError> { - let dummy_code = WasmModule::::dummy_with_bytes(0); - let accounts = (0..r) - .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) - .collect::, _>>()?; - let account_id_len = accounts.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); - let account_id_bytes = accounts.iter().flat_map(|x| x.account_id.encode()).collect(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "account_reentrance_count", - params: vec![ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: account_id_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, account_id_len as u32), // account_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + fn seal_account_reentrance_count() { + let Contract { account_id, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + build_runtime!(runtime, memory: [account_id.encode(),]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_account_reentrance_count(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_eq!(result.unwrap(), 0); } #[benchmark(pov_mode = Measured)] - fn seal_instantiation_nonce(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "instantiation_nonce", - params: vec![], - return_type: Some(ValueType::I64), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - call_builder!(func, code); + fn seal_instantiation_nonce() { + build_runtime!(runtime, memory: []); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_instantiation_nonce(&mut runtime, &mut memory); } - assert_eq!(res.did_revert(), false); + + assert_eq!(result.unwrap(), 1); } // We load `i64` values from random linear memory locations and store the loaded diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 21ebb1e8c5f..992f7aaace3 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -303,7 +303,7 @@ pub trait Ext: sealing::Sealed { fn ecdsa_to_eth_address(&self, pk: &[u8; 33]) -> Result<[u8; 20], ()>; /// Tests sometimes need to modify and inspect the contract info directly. - #[cfg(test)] + #[cfg(any(test, feature = "runtime-benchmarks"))] fn contract_info(&mut self) -> &mut ContractInfo; /// Sets new code hash for existing contract. @@ -365,6 +365,11 @@ pub trait Ext: sealing::Sealed { &mut self, code_hash: &CodeHash, ) -> Result<(), DispatchError>; + + /// Returns the number of locked delegate dependencies. + /// + /// Note: Requires &mut self to access the contract info. + fn locked_delegate_dependencies_count(&mut self) -> usize; } /// Describes the different functions that can be exported by an [`Executable`]. @@ -1497,7 +1502,7 @@ where ECDSAPublic::from(*pk).to_eth_address() } - #[cfg(test)] + #[cfg(any(test, feature = "runtime-benchmarks"))] fn contract_info(&mut self) -> &mut ContractInfo { self.top_frame_mut().contract_info() } @@ -1605,6 +1610,10 @@ where .charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(deposit)); Ok(()) } + + fn locked_delegate_dependencies_count(&mut self) -> usize { + self.top_frame_mut().contract_info().delegate_dependencies_count() + } } mod sealing { diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index d20f3c15fb5..6fab1a44ecb 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -146,7 +146,7 @@ pub use crate::{ exec::Frame, migration::{MigrateSequence, Migration, NoopMigration}, pallet::*, - schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, + schedule::{InstructionWeights, Limits, Schedule}, wasm::Determinism, }; pub use weights::WeightInfo; diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index 06a7c2005aa..a1fbdea4228 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -22,7 +22,7 @@ use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; use core::marker::PhantomData; -use frame_support::{weights::Weight, DefaultNoBound}; +use frame_support::DefaultNoBound; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -60,9 +60,6 @@ pub struct Schedule { /// The weights for individual wasm instructions. pub instruction_weights: InstructionWeights, - - /// The weights for each imported function a contract is allowed to call. - pub host_fn_weights: HostFnWeights, } /// Describes the upper limits on various metrics. @@ -109,230 +106,6 @@ pub struct InstructionWeights { pub _phantom: PhantomData, } -/// Describes the weight for each imported function that a contract is allowed to call. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "runtime-benchmarks", derive(pallet_contracts_proc_macro::WeightDebug))] -#[derive(Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct HostFnWeights { - /// Weight of calling `seal_caller`. - pub caller: Weight, - - /// Weight of calling `seal_is_contract`. - pub is_contract: Weight, - - /// Weight of calling `seal_code_hash`. - pub code_hash: Weight, - - /// Weight of calling `seal_own_code_hash`. - pub own_code_hash: Weight, - - /// Weight of calling `seal_caller_is_origin`. - pub caller_is_origin: Weight, - - /// Weight of calling `seal_caller_is_root`. - pub caller_is_root: Weight, - - /// Weight of calling `seal_address`. - pub address: Weight, - - /// Weight of calling `seal_gas_left`. - pub gas_left: Weight, - - /// Weight of calling `seal_balance`. - pub balance: Weight, - - /// Weight of calling `seal_value_transferred`. - pub value_transferred: Weight, - - /// Weight of calling `seal_minimum_balance`. - pub minimum_balance: Weight, - - /// Weight of calling `seal_block_number`. - pub block_number: Weight, - - /// Weight of calling `seal_now`. - pub now: Weight, - - /// Weight of calling `seal_weight_to_fee`. - pub weight_to_fee: Weight, - - /// Weight of calling `seal_input`. - pub input: Weight, - - /// Weight per input byte copied to contract memory by `seal_input`. - pub input_per_byte: Weight, - - /// Weight of calling `seal_return`. - pub r#return: Weight, - - /// Weight per byte returned through `seal_return`. - pub return_per_byte: Weight, - - /// Weight of calling `seal_terminate`. - pub terminate: Weight, - - /// Weight of calling `seal_random`. - pub random: Weight, - - /// Weight of calling `seal_reposit_event`. - pub deposit_event: Weight, - - /// Weight per topic supplied to `seal_deposit_event`. - pub deposit_event_per_topic: Weight, - - /// Weight per byte of an event deposited through `seal_deposit_event`. - pub deposit_event_per_byte: Weight, - - /// Weight of calling `seal_debug_message`. - pub debug_message: Weight, - - /// Weight of calling `seal_debug_message` per byte of the message. - pub debug_message_per_byte: Weight, - - /// Weight of calling `seal_set_storage`. - pub set_storage: Weight, - - /// Weight per written byte of an item stored with `seal_set_storage`. - pub set_storage_per_new_byte: Weight, - - /// Weight per overwritten byte of an item stored with `seal_set_storage`. - pub set_storage_per_old_byte: Weight, - - /// Weight of calling `seal_set_code_hash`. - pub set_code_hash: Weight, - - /// Weight of calling `seal_clear_storage`. - pub clear_storage: Weight, - - /// Weight of calling `seal_clear_storage` per byte of the stored item. - pub clear_storage_per_byte: Weight, - - /// Weight of calling `seal_contains_storage`. - pub contains_storage: Weight, - - /// Weight of calling `seal_contains_storage` per byte of the stored item. - pub contains_storage_per_byte: Weight, - - /// Weight of calling `seal_get_storage`. - pub get_storage: Weight, - - /// Weight per byte of an item received via `seal_get_storage`. - pub get_storage_per_byte: Weight, - - /// Weight of calling `seal_take_storage`. - pub take_storage: Weight, - - /// Weight per byte of an item received via `seal_take_storage`. - pub take_storage_per_byte: Weight, - - /// Weight of calling `seal_transfer`. - pub transfer: Weight, - - /// Weight of calling `seal_call`. - pub call: Weight, - - /// Weight of calling `seal_delegate_call`. - pub delegate_call: Weight, - - /// Weight surcharge that is claimed if `seal_call` does a balance transfer. - pub call_transfer_surcharge: Weight, - - /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. - pub call_per_cloned_byte: Weight, - - /// Weight of calling `seal_instantiate`. - pub instantiate: Weight, - - /// Weight surcharge that is claimed if `seal_instantiate` does a balance transfer. - pub instantiate_transfer_surcharge: Weight, - - /// Weight per input byte supplied to `seal_instantiate`. - pub instantiate_per_input_byte: Weight, - - /// Weight per salt byte supplied to `seal_instantiate`. - pub instantiate_per_salt_byte: Weight, - - /// Weight of calling `seal_hash_sha_256`. - pub hash_sha2_256: Weight, - - /// Weight per byte hashed by `seal_hash_sha_256`. - pub hash_sha2_256_per_byte: Weight, - - /// Weight of calling `seal_hash_keccak_256`. - pub hash_keccak_256: Weight, - - /// Weight per byte hashed by `seal_hash_keccak_256`. - pub hash_keccak_256_per_byte: Weight, - - /// Weight of calling `seal_hash_blake2_256`. - pub hash_blake2_256: Weight, - - /// Weight per byte hashed by `seal_hash_blake2_256`. - pub hash_blake2_256_per_byte: Weight, - - /// Weight of calling `seal_hash_blake2_128`. - pub hash_blake2_128: Weight, - - /// Weight per byte hashed by `seal_hash_blake2_128`. - pub hash_blake2_128_per_byte: Weight, - - /// Weight of calling `seal_ecdsa_recover`. - pub ecdsa_recover: Weight, - - /// Weight of calling `seal_ecdsa_to_eth_address`. - pub ecdsa_to_eth_address: Weight, - - /// Weight of calling `sr25519_verify`. - pub sr25519_verify: Weight, - - /// Weight per byte of calling `sr25519_verify`. - pub sr25519_verify_per_byte: Weight, - - /// Weight of calling `reentrance_count`. - pub reentrance_count: Weight, - - /// Weight of calling `account_reentrance_count`. - pub account_reentrance_count: Weight, - - /// Weight of calling `instantiation_nonce`. - pub instantiation_nonce: Weight, - - /// Weight of calling `lock_delegate_dependency`. - pub lock_delegate_dependency: Weight, - - /// Weight of calling `unlock_delegate_dependency`. - pub unlock_delegate_dependency: Weight, - - /// The type parameter is used in the default implementation. - #[codec(skip)] - pub _phantom: PhantomData, -} - -macro_rules! replace_token { - ($_in:tt $replacement:tt) => { - $replacement - }; -} - -macro_rules! call_zero { - ($name:ident, $( $arg:expr ),*) => { - T::WeightInfo::$name($( replace_token!($arg 0) ),*) - }; -} - -macro_rules! cost_args { - ($name:ident, $( $arg: expr ),+) => { - (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))) - } -} - -macro_rules! cost { - ($name:ident) => { - cost_args!($name, 1) - }; -} - impl Default for Limits { fn default() -> Self { Self { @@ -350,94 +123,10 @@ impl Default for InstructionWeights { /// computed gas costs by 6 to have a rough estimate as to how expensive each /// single executed instruction is going to be. fn default() -> Self { - let instr_cost = cost!(instr_i64_load_store).ref_time() as u32; + let instr_cost = T::WeightInfo::instr_i64_load_store(1) + .saturating_sub(T::WeightInfo::instr_i64_load_store(0)) + .ref_time() as u32; let base = instr_cost / 6; Self { base, _phantom: PhantomData } } } - -impl Default for HostFnWeights { - fn default() -> Self { - Self { - caller: cost!(seal_caller), - is_contract: cost!(seal_is_contract), - code_hash: cost!(seal_code_hash), - own_code_hash: cost!(seal_own_code_hash), - caller_is_origin: cost!(seal_caller_is_origin), - caller_is_root: cost!(seal_caller_is_root), - address: cost!(seal_address), - gas_left: cost!(seal_gas_left), - balance: cost!(seal_balance), - value_transferred: cost!(seal_value_transferred), - minimum_balance: cost!(seal_minimum_balance), - block_number: cost!(seal_block_number), - now: cost!(seal_now), - weight_to_fee: cost!(seal_weight_to_fee), - input: cost!(seal_input), - input_per_byte: cost!(seal_input_per_byte), - r#return: cost!(seal_return), - return_per_byte: cost!(seal_return_per_byte), - terminate: cost!(seal_terminate), - random: cost!(seal_random), - deposit_event: cost!(seal_deposit_event), - deposit_event_per_topic: cost_args!(seal_deposit_event_per_topic_and_byte, 1, 0), - deposit_event_per_byte: cost_args!(seal_deposit_event_per_topic_and_byte, 0, 1), - debug_message: cost!(seal_debug_message), - debug_message_per_byte: cost!(seal_debug_message_per_byte), - set_storage: cost!(seal_set_storage), - set_code_hash: cost!(seal_set_code_hash), - set_storage_per_new_byte: cost!(seal_set_storage_per_new_byte), - set_storage_per_old_byte: cost!(seal_set_storage_per_old_byte), - clear_storage: cost!(seal_clear_storage), - clear_storage_per_byte: cost!(seal_clear_storage_per_byte), - contains_storage: cost!(seal_contains_storage), - contains_storage_per_byte: cost!(seal_contains_storage_per_byte), - get_storage: cost!(seal_get_storage), - get_storage_per_byte: cost!(seal_get_storage_per_byte), - take_storage: cost!(seal_take_storage), - take_storage_per_byte: cost!(seal_take_storage_per_byte), - transfer: cost!(seal_transfer), - call: cost!(seal_call), - delegate_call: cost!(seal_delegate_call), - call_transfer_surcharge: cost_args!(seal_call_per_transfer_clone_byte, 1, 0), - call_per_cloned_byte: cost_args!(seal_call_per_transfer_clone_byte, 0, 1), - instantiate: cost!(seal_instantiate), - instantiate_transfer_surcharge: cost_args!( - seal_instantiate_per_transfer_input_salt_byte, - 1, - 0, - 0 - ), - instantiate_per_input_byte: cost_args!( - seal_instantiate_per_transfer_input_salt_byte, - 0, - 1, - 0 - ), - instantiate_per_salt_byte: cost_args!( - seal_instantiate_per_transfer_input_salt_byte, - 0, - 0, - 1 - ), - hash_sha2_256: cost!(seal_hash_sha2_256), - hash_sha2_256_per_byte: cost!(seal_hash_sha2_256_per_byte), - hash_keccak_256: cost!(seal_hash_keccak_256), - hash_keccak_256_per_byte: cost!(seal_hash_keccak_256_per_byte), - hash_blake2_256: cost!(seal_hash_blake2_256), - hash_blake2_256_per_byte: cost!(seal_hash_blake2_256_per_byte), - hash_blake2_128: cost!(seal_hash_blake2_128), - hash_blake2_128_per_byte: cost!(seal_hash_blake2_128_per_byte), - ecdsa_recover: cost!(seal_ecdsa_recover), - sr25519_verify: cost!(seal_sr25519_verify), - sr25519_verify_per_byte: cost!(seal_sr25519_verify_per_byte), - ecdsa_to_eth_address: cost!(seal_ecdsa_to_eth_address), - reentrance_count: cost!(seal_reentrance_count), - account_reentrance_count: cost!(seal_account_reentrance_count), - instantiation_nonce: cost!(seal_instantiation_nonce), - lock_delegate_dependency: cost!(lock_delegate_dependency), - unlock_delegate_dependency: cost!(unlock_delegate_dependency), - _phantom: PhantomData, - } - } -} diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 251c037d317..899b0144b07 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -871,8 +871,7 @@ fn gas_syncs_work() { let result = builder::bare_call(addr.clone()).data(1u32.encode()).build(); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); - let host_consumed_once = - ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); + let host_consumed_once = ::WeightInfo::seal_caller_is_origin().ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; let result = builder::bare_call(addr).data(2u32.encode()).build(); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index b40eb699db9..e5497b143b8 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -31,6 +31,9 @@ pub use { tests::MockExt, }; +#[cfg(feature = "runtime-benchmarks")] +pub use crate::wasm::runtime::{BenchEnv, ReturnData, TrapReason}; + pub use crate::wasm::{ prepare::{LoadedModule, LoadingMode}, runtime::{ @@ -802,6 +805,9 @@ mod tests { self.delegate_dependencies.borrow_mut().remove(code); Ok(()) } + fn locked_delegate_dependencies_count(&mut self) -> usize { + self.delegate_dependencies.borrow().len() + } } /// Execute the supplied code. diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 3212aff3126..39b15c867c6 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -21,6 +21,7 @@ use crate::{ exec::{ExecError, ExecResult, Ext, Key, TopicOf}, gas::{ChargedAmount, Token}, primitives::ExecReturnValue, + weights::WeightInfo, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; @@ -145,6 +146,8 @@ impl HostError for TrapReason {} #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum RuntimeCosts { + /// Base Weight of calling a host function. + HostFn, /// Weight charged for copying data from the sandbox. CopyFromContract(u32), /// Weight charged for copying data to the sandbox. @@ -177,12 +180,8 @@ pub enum RuntimeCosts { Now, /// Weight of calling `seal_weight_to_fee`. WeightToFee, - /// Weight of calling `seal_input` without the weight of copying the input. - InputBase, - /// Weight of calling `seal_return` for the given output size. - Return(u32), - /// Weight of calling `seal_terminate`. - Terminate, + /// Weight of calling `seal_terminate`, passing the number of locked dependencies. + Terminate(u32), /// Weight of calling `seal_random`. It includes the weight for copying the subject. Random, /// Weight of calling `seal_deposit_event` with the given number of topics and event size. @@ -206,13 +205,13 @@ pub enum RuntimeCosts { /// Weight of calling `seal_delegate_call` for the given input size. DelegateCallBase, /// Weight of the transfer performed during a call. - CallSurchargeTransfer, + CallTransferSurcharge, /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. CallInputCloned(u32), /// Weight of calling `seal_instantiate` for the given input length and salt. InstantiateBase { input_data_len: u32, salt_len: u32 }, /// Weight of the transfer performed during an instantiate. - InstantiateSurchargeTransfer, + InstantiateTransferSurcharge, /// Weight of calling `seal_hash_sha_256` for the given input size. HashSha256(u32), /// Weight of calling `seal_hash_keccak_256` for the given input size. @@ -236,9 +235,9 @@ pub enum RuntimeCosts { /// Weight of calling `ecdsa_to_eth_address` EcdsaToEthAddress, /// Weight of calling `reentrance_count` - ReentrantCount, + ReentranceCount, /// Weight of calling `account_reentrance_count` - AccountEntranceCount, + AccountReentranceCount, /// Weight of calling `instantiation_nonce` InstantiationNonce, /// Weight of calling `lock_delegate_dependency` @@ -247,6 +246,19 @@ pub enum RuntimeCosts { UnlockDelegateDependency, } +macro_rules! cost_args { + // cost_args!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_sub(T::WeightInfo::name(0, 0, 0)) + ($name:ident, $( $arg: expr ),+) => { + (T::WeightInfo::$name($( $arg ),+).saturating_sub(cost_args!(@call_zero $name, $( $arg ),+))) + }; + // Transform T::WeightInfo::name(a, b, c) into T::WeightInfo::name(0, 0, 0) + (@call_zero $name:ident, $( $arg:expr ),*) => { + T::WeightInfo::$name($( cost_args!(@replace_token $arg) ),*) + }; + // Replace the token with 0. + (@replace_token $_in:tt) => { 0 }; +} + impl Token for RuntimeCosts { fn influence_lowest_gas_limit(&self) -> bool { match self { @@ -256,85 +268,57 @@ impl Token for RuntimeCosts { } fn weight(&self) -> Weight { - let s = T::Schedule::get().host_fn_weights; use self::RuntimeCosts::*; match *self { - CopyFromContract(len) => s.return_per_byte.saturating_mul(len.into()), - CopyToContract(len) => s.input_per_byte.saturating_mul(len.into()), - Caller => s.caller, - IsContract => s.is_contract, - CodeHash => s.code_hash, - OwnCodeHash => s.own_code_hash, - CallerIsOrigin => s.caller_is_origin, - CallerIsRoot => s.caller_is_root, - Address => s.address, - GasLeft => s.gas_left, - Balance => s.balance, - ValueTransferred => s.value_transferred, - MinimumBalance => s.minimum_balance, - BlockNumber => s.block_number, - Now => s.now, - WeightToFee => s.weight_to_fee, - InputBase => s.input, - Return(len) => s.r#return.saturating_add(s.return_per_byte.saturating_mul(len.into())), - Terminate => s.terminate, - Random => s.random, - DepositEvent { num_topic, len } => s - .deposit_event - .saturating_add(s.deposit_event_per_topic.saturating_mul(num_topic.into())) - .saturating_add(s.deposit_event_per_byte.saturating_mul(len.into())), - DebugMessage(len) => s - .debug_message - .saturating_add(s.deposit_event_per_byte.saturating_mul(len.into())), - SetStorage { new_bytes, old_bytes } => s - .set_storage - .saturating_add(s.set_storage_per_new_byte.saturating_mul(new_bytes.into())) - .saturating_add(s.set_storage_per_old_byte.saturating_mul(old_bytes.into())), - ClearStorage(len) => s - .clear_storage - .saturating_add(s.clear_storage_per_byte.saturating_mul(len.into())), - ContainsStorage(len) => s - .contains_storage - .saturating_add(s.contains_storage_per_byte.saturating_mul(len.into())), - GetStorage(len) => - s.get_storage.saturating_add(s.get_storage_per_byte.saturating_mul(len.into())), - TakeStorage(len) => s - .take_storage - .saturating_add(s.take_storage_per_byte.saturating_mul(len.into())), - Transfer => s.transfer, - CallBase => s.call, - DelegateCallBase => s.delegate_call, - CallSurchargeTransfer => s.call_transfer_surcharge, - CallInputCloned(len) => s.call_per_cloned_byte.saturating_mul(len.into()), - InstantiateBase { input_data_len, salt_len } => s - .instantiate - .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) - .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), - InstantiateSurchargeTransfer => s.instantiate_transfer_surcharge, - HashSha256(len) => s - .hash_sha2_256 - .saturating_add(s.hash_sha2_256_per_byte.saturating_mul(len.into())), - HashKeccak256(len) => s - .hash_keccak_256 - .saturating_add(s.hash_keccak_256_per_byte.saturating_mul(len.into())), - HashBlake256(len) => s - .hash_blake2_256 - .saturating_add(s.hash_blake2_256_per_byte.saturating_mul(len.into())), - HashBlake128(len) => s - .hash_blake2_128 - .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), - EcdsaRecovery => s.ecdsa_recover, - Sr25519Verify(len) => s - .sr25519_verify - .saturating_add(s.sr25519_verify_per_byte.saturating_mul(len.into())), + HostFn => cost_args!(noop_host_fn, 1), + CopyToContract(len) => T::WeightInfo::seal_input(len), + CopyFromContract(len) => T::WeightInfo::seal_return(len), + Caller => T::WeightInfo::seal_caller(), + IsContract => T::WeightInfo::seal_is_contract(), + CodeHash => T::WeightInfo::seal_code_hash(), + OwnCodeHash => T::WeightInfo::seal_own_code_hash(), + CallerIsOrigin => T::WeightInfo::seal_caller_is_origin(), + CallerIsRoot => T::WeightInfo::seal_caller_is_root(), + Address => T::WeightInfo::seal_address(), + GasLeft => T::WeightInfo::seal_gas_left(), + Balance => T::WeightInfo::seal_balance(), + ValueTransferred => T::WeightInfo::seal_value_transferred(), + MinimumBalance => T::WeightInfo::seal_minimum_balance(), + BlockNumber => T::WeightInfo::seal_block_number(), + Now => T::WeightInfo::seal_now(), + WeightToFee => T::WeightInfo::seal_weight_to_fee(), + Terminate(locked_dependencies) => T::WeightInfo::seal_terminate(locked_dependencies), + Random => T::WeightInfo::seal_random(), + DepositEvent { num_topic, len } => T::WeightInfo::seal_deposit_event(num_topic, len), + DebugMessage(len) => T::WeightInfo::seal_debug_message(len), + SetStorage { new_bytes, old_bytes } => + T::WeightInfo::seal_set_storage(new_bytes, old_bytes), + ClearStorage(len) => T::WeightInfo::seal_clear_storage(len), + ContainsStorage(len) => T::WeightInfo::seal_contains_storage(len), + GetStorage(len) => T::WeightInfo::seal_get_storage(len), + TakeStorage(len) => T::WeightInfo::seal_take_storage(len), + Transfer => T::WeightInfo::seal_transfer(), + CallBase => T::WeightInfo::seal_call(0, 0), + DelegateCallBase => T::WeightInfo::seal_delegate_call(), + CallTransferSurcharge => cost_args!(seal_call, 1, 0), + CallInputCloned(len) => cost_args!(seal_call, 0, len), + InstantiateBase { input_data_len, salt_len } => + T::WeightInfo::seal_instantiate(0, input_data_len, salt_len), + InstantiateTransferSurcharge => cost_args!(seal_instantiate, 1, 0, 0), + HashSha256(len) => T::WeightInfo::seal_hash_sha2_256(len), + HashKeccak256(len) => T::WeightInfo::seal_hash_keccak_256(len), + HashBlake256(len) => T::WeightInfo::seal_hash_blake2_256(len), + HashBlake128(len) => T::WeightInfo::seal_hash_blake2_128(len), + EcdsaRecovery => T::WeightInfo::seal_ecdsa_recover(), + Sr25519Verify(len) => T::WeightInfo::seal_sr25519_verify(len), ChainExtension(weight) | CallRuntime(weight) | CallXcmExecute(weight) => weight, - SetCodeHash => s.set_code_hash, - EcdsaToEthAddress => s.ecdsa_to_eth_address, - ReentrantCount => s.reentrance_count, - AccountEntranceCount => s.account_reentrance_count, - InstantiationNonce => s.instantiation_nonce, - LockDelegateDependency => s.lock_delegate_dependency, - UnlockDelegateDependency => s.unlock_delegate_dependency, + SetCodeHash => T::WeightInfo::seal_set_code_hash(), + EcdsaToEthAddress => T::WeightInfo::seal_ecdsa_to_eth_address(), + ReentranceCount => T::WeightInfo::seal_reentrance_count(), + AccountReentranceCount => T::WeightInfo::seal_account_reentrance_count(), + InstantiationNonce => T::WeightInfo::seal_instantiation_nonce(), + LockDelegateDependency => T::WeightInfo::lock_delegate_dependency(), + UnlockDelegateDependency => T::WeightInfo::unlock_delegate_dependency(), } } } @@ -819,6 +803,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { output_len_ptr: u32, ) -> Result { self.charge_gas(call_type.cost())?; + let input_data = if flags.contains(CallFlags::CLONE_INPUT) { let input = self.input_data.as_ref().ok_or(Error::::InputForwarded)?; charge_gas!(self, RuntimeCosts::CallInputCloned(input.len() as u32))?; @@ -842,7 +827,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; if value > 0u32.into() { - self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; + self.charge_gas(RuntimeCosts::CallTransferSurcharge)?; } self.ext.call( weight, @@ -910,7 +895,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { }; let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; if value > 0u32.into() { - self.charge_gas(RuntimeCosts::InstantiateSurchargeTransfer)?; + self.charge_gas(RuntimeCosts::InstantiateTransferSurcharge)?; } let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(memory, code_hash_ptr)?; @@ -942,7 +927,9 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { } fn terminate(&mut self, memory: &[u8], beneficiary_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::Terminate)?; + let count = self.ext.locked_delegate_dependencies_count() as _; + self.charge_gas(RuntimeCosts::Terminate(count))?; + let beneficiary: <::T as frame_system::Config>::AccountId = self.read_sandbox_memory_as(memory, beneficiary_ptr)?; self.ext.terminate(&beneficiary)?; @@ -959,6 +946,14 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { + + /// Noop function used to benchmark the time it takes to execute an empty function. + #[cfg(feature = "runtime-benchmarks")] + #[unstable] + fn noop(ctx: _, memory: _) -> Result<(), TrapReason> { + Ok(()) + } + /// Set the value at the given key in the contract storage. /// See [`pallet_contracts_uapi::HostFn::set_storage`] #[prefixed_alias] @@ -1387,7 +1382,6 @@ pub mod env { /// See [`pallet_contracts_uapi::HostFn::input`]. #[prefixed_alias] fn input(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(memory, out_ptr, out_len_ptr, &input, false, |len| { Some(RuntimeCosts::CopyToContract(len)) @@ -1408,7 +1402,7 @@ pub mod env { data_ptr: u32, data_len: u32, ) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::Return(data_len))?; + ctx.charge_gas(RuntimeCosts::CopyFromContract(data_len))?; Err(TrapReason::Return(ReturnData { flags, data: ctx.read_sandbox_memory(memory, data_ptr, data_len)?, @@ -2249,7 +2243,7 @@ pub mod env { /// See [`pallet_contracts_uapi::HostFn::reentrance_count`]. #[unstable] fn reentrance_count(ctx: _, memory: _) -> Result { - ctx.charge_gas(RuntimeCosts::ReentrantCount)?; + ctx.charge_gas(RuntimeCosts::ReentranceCount)?; Ok(ctx.ext.reentrance_count()) } @@ -2258,7 +2252,7 @@ pub mod env { /// See [`pallet_contracts_uapi::HostFn::account_reentrance_count`]. #[unstable] fn account_reentrance_count(ctx: _, memory: _, account_ptr: u32) -> Result { - ctx.charge_gas(RuntimeCosts::AccountEntranceCount)?; + ctx.charge_gas(RuntimeCosts::AccountReentranceCount)?; let account_id: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(memory, account_ptr)?; Ok(ctx.ext.account_reentrance_count(&account_id)) diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 950476698cd..2e9c2cd15af 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,65 +72,49 @@ pub trait WeightInfo { fn upload_code_determinism_relaxed(c: u32, ) -> Weight; fn remove_code() -> Weight; fn set_code() -> Weight; - fn seal_caller(r: u32, ) -> Weight; - fn seal_is_contract(r: u32, ) -> Weight; - fn seal_code_hash(r: u32, ) -> Weight; - fn seal_own_code_hash(r: u32, ) -> Weight; - fn seal_caller_is_origin(r: u32, ) -> Weight; - fn seal_caller_is_root(r: u32, ) -> Weight; - fn seal_address(r: u32, ) -> Weight; - fn seal_gas_left(r: u32, ) -> Weight; - fn seal_balance(r: u32, ) -> Weight; - fn seal_value_transferred(r: u32, ) -> Weight; - fn seal_minimum_balance(r: u32, ) -> Weight; - fn seal_block_number(r: u32, ) -> Weight; - fn seal_now(r: u32, ) -> Weight; - fn seal_weight_to_fee(r: u32, ) -> Weight; - fn seal_input(r: u32, ) -> Weight; - fn seal_input_per_byte(n: u32, ) -> Weight; - fn seal_return(r: u32, ) -> Weight; - fn seal_return_per_byte(n: u32, ) -> Weight; - fn seal_terminate(r: u32, ) -> Weight; - fn seal_random(r: u32, ) -> Weight; - fn seal_deposit_event(r: u32, ) -> Weight; - fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight; - fn seal_debug_message(r: u32, ) -> Weight; - fn seal_debug_message_per_byte(i: u32, ) -> Weight; - fn seal_set_storage(r: u32, ) -> Weight; - fn seal_set_storage_per_new_byte(n: u32, ) -> Weight; - fn seal_set_storage_per_old_byte(n: u32, ) -> Weight; - fn seal_clear_storage(r: u32, ) -> Weight; - fn seal_clear_storage_per_byte(n: u32, ) -> Weight; - fn seal_get_storage(r: u32, ) -> Weight; - fn seal_get_storage_per_byte(n: u32, ) -> Weight; - fn seal_contains_storage(r: u32, ) -> Weight; - fn seal_contains_storage_per_byte(n: u32, ) -> Weight; - fn seal_take_storage(r: u32, ) -> Weight; - fn seal_take_storage_per_byte(n: u32, ) -> Weight; - fn seal_transfer(r: u32, ) -> Weight; - fn seal_call(r: u32, ) -> Weight; - fn seal_delegate_call(r: u32, ) -> Weight; - fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight; - fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight; - fn seal_hash_sha2_256(r: u32, ) -> Weight; - fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight; - fn seal_hash_keccak_256(r: u32, ) -> Weight; - fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight; - fn seal_hash_blake2_256(r: u32, ) -> Weight; - fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight; - fn seal_hash_blake2_128(r: u32, ) -> Weight; - fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight; - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight; - fn seal_sr25519_verify(r: u32, ) -> Weight; - fn seal_ecdsa_recover(r: u32, ) -> Weight; - fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight; - fn seal_set_code_hash(r: u32, ) -> Weight; - fn lock_delegate_dependency(r: u32, ) -> Weight; - fn unlock_delegate_dependency(r: u32, ) -> Weight; - fn seal_reentrance_count(r: u32, ) -> Weight; - fn seal_account_reentrance_count(r: u32, ) -> Weight; - fn seal_instantiation_nonce(r: u32, ) -> Weight; + fn noop_host_fn(r: u32, ) -> Weight; + fn seal_caller() -> Weight; + fn seal_is_contract() -> Weight; + fn seal_code_hash() -> Weight; + fn seal_own_code_hash() -> Weight; + fn seal_caller_is_origin() -> Weight; + fn seal_caller_is_root() -> Weight; + fn seal_address() -> Weight; + fn seal_gas_left() -> Weight; + fn seal_balance() -> Weight; + fn seal_value_transferred() -> Weight; + fn seal_minimum_balance() -> Weight; + fn seal_block_number() -> Weight; + fn seal_now() -> Weight; + fn seal_weight_to_fee() -> Weight; + fn seal_input(n: u32, ) -> Weight; + fn seal_return(n: u32, ) -> Weight; + fn seal_terminate(n: u32, ) -> Weight; + fn seal_random() -> Weight; + fn seal_deposit_event(t: u32, n: u32, ) -> Weight; + fn seal_debug_message(i: u32, ) -> Weight; + fn seal_set_storage(n: u32, o: u32, ) -> Weight; + fn seal_clear_storage(n: u32, ) -> Weight; + fn seal_get_storage(n: u32, ) -> Weight; + fn seal_contains_storage(n: u32, ) -> Weight; + fn seal_take_storage(n: u32, ) -> Weight; + fn seal_transfer() -> Weight; + fn seal_call(t: u32, i: u32, ) -> Weight; + fn seal_delegate_call() -> Weight; + fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight; + fn seal_hash_sha2_256(n: u32, ) -> Weight; + fn seal_hash_keccak_256(n: u32, ) -> Weight; + fn seal_hash_blake2_256(n: u32, ) -> Weight; + fn seal_hash_blake2_128(n: u32, ) -> Weight; + fn seal_sr25519_verify(n: u32, ) -> Weight; + fn seal_ecdsa_recover() -> Weight; + fn seal_ecdsa_to_eth_address() -> Weight; + fn seal_set_code_hash() -> Weight; + fn lock_delegate_dependency() -> Weight; + fn unlock_delegate_dependency() -> Weight; + fn seal_reentrance_count() -> Weight; + fn seal_account_reentrance_count() -> Weight; + fn seal_instantiation_nonce() -> Weight; fn instr_i64_load_store(r: u32, ) -> Weight; } @@ -143,8 +127,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_002_000 picoseconds. - Weight::from_parts(2_193_000, 1627) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_142_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -154,10 +138,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ยฑ0)` // Estimated: `442 + k * (70 ยฑ0)` - // Minimum execution time: 12_339_000 picoseconds. - Weight::from_parts(12_682_000, 442) - // Standard Error: 1_302 - .saturating_add(Weight::from_parts(1_163_234, 0).saturating_mul(k.into())) + // Minimum execution time: 12_095_000 picoseconds. + Weight::from_parts(12_699_000, 442) + // Standard Error: 891 + .saturating_add(Weight::from_parts(1_114_063, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -171,10 +155,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ยฑ0)` // Estimated: `6149 + c * (1 ยฑ0)` - // Minimum execution time: 8_145_000 picoseconds. - Weight::from_parts(8_747_247, 6149) + // Minimum execution time: 8_433_000 picoseconds. + Weight::from_parts(8_992_328, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_154, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -187,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_950_000 picoseconds. - Weight::from_parts(17_498_000, 6450) + // Minimum execution time: 16_415_000 picoseconds. + Weight::from_parts(17_348_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ยฑ0)` // Estimated: `3635 + k * (1 ยฑ0)` - // Minimum execution time: 3_431_000 picoseconds. - Weight::from_parts(2_161_027, 3635) - // Standard Error: 949 - .saturating_add(Weight::from_parts(1_219_406, 0).saturating_mul(k.into())) + // Minimum execution time: 3_433_000 picoseconds. + Weight::from_parts(3_490_000, 3635) + // Standard Error: 1_043 + .saturating_add(Weight::from_parts(1_225_953, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -223,10 +207,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `325 + c * (1 ยฑ0)` // Estimated: `6263 + c * (1 ยฑ0)` - // Minimum execution time: 16_384_000 picoseconds. - Weight::from_parts(16_741_331, 6263) - // Standard Error: 1 - .saturating_add(Weight::from_parts(375, 0).saturating_mul(c.into())) + // Minimum execution time: 16_421_000 picoseconds. + Weight::from_parts(16_822_963, 6263) + // Standard Error: 0 + .saturating_add(Weight::from_parts(456, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -237,8 +221,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_529_000 picoseconds. - Weight::from_parts(13_319_000, 6380) + // Minimum execution time: 12_569_000 picoseconds. + Weight::from_parts(13_277_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -252,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_462_000 picoseconds. - Weight::from_parts(48_784_000, 6292) + // Minimum execution time: 46_777_000 picoseconds. + Weight::from_parts(47_690_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -265,8 +249,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 55_712_000 picoseconds. - Weight::from_parts(58_629_000, 6534) + // Minimum execution time: 55_280_000 picoseconds. + Weight::from_parts(57_081_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -276,8 +260,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_992_000 picoseconds. - Weight::from_parts(12_686_000, 6349) + // Minimum execution time: 12_077_000 picoseconds. + Weight::from_parts(12_647_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -287,8 +271,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_498_000 picoseconds. - Weight::from_parts(2_594_000, 1627) + // Minimum execution time: 2_559_000 picoseconds. + Weight::from_parts(2_711_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -300,8 +284,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_179_000 picoseconds. - Weight::from_parts(12_805_000, 3631) + // Minimum execution time: 12_238_000 picoseconds. + Weight::from_parts(12_627_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -311,8 +295,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_695_000 picoseconds. - Weight::from_parts(5_105_000, 3607) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_086_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -323,8 +307,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_223_000 picoseconds. - Weight::from_parts(6_509_000, 3632) + // Minimum execution time: 6_147_000 picoseconds. + Weight::from_parts(6_380_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -335,8 +319,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_073_000 picoseconds. - Weight::from_parts(6_524_000, 3607) + // Minimum execution time: 6_140_000 picoseconds. + Weight::from_parts(6_670_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -352,19 +336,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `801 + c * (1 ยฑ0)` - // Estimated: `6739 + c * (1 ยฑ0)` - // Minimum execution time: 289_627_000 picoseconds. - Weight::from_parts(281_167_857, 6739) - // Standard Error: 68 - .saturating_add(Weight::from_parts(33_442, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Estimated: `4264 + c * (1 ยฑ0)` + // Minimum execution time: 354_459_000 picoseconds. + Weight::from_parts(332_397_871, 4264) + // Standard Error: 70 + .saturating_add(Weight::from_parts(33_775, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -373,8 +355,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -391,17 +371,17 @@ impl WeightInfo for SubstrateWeight { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_829_638_000 picoseconds. - Weight::from_parts(744_994_885, 8737) - // Standard Error: 165 - .saturating_add(Weight::from_parts(68_083, 0).saturating_mul(c.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_484, 0).saturating_mul(i.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_581, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) + // Estimated: `6262` + // Minimum execution time: 4_239_452_000 picoseconds. + Weight::from_parts(800_849_282, 6262) + // Standard Error: 117 + .saturating_add(Weight::from_parts(68_435, 0).saturating_mul(c.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(i.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_668, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -417,8 +397,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. @@ -426,15 +404,15 @@ impl WeightInfo for SubstrateWeight { fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_960_218_000 picoseconds. - Weight::from_parts(1_976_273_000, 6504) - // Standard Error: 25 - .saturating_add(Weight::from_parts(866, 0).saturating_mul(i.into())) - // Standard Error: 25 - .saturating_add(Weight::from_parts(824, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(7_u64)) + // Estimated: `4029` + // Minimum execution time: 2_085_570_000 picoseconds. + Weight::from_parts(2_112_501_000, 4029) + // Standard Error: 26 + .saturating_add(Weight::from_parts(888, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(795, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -448,16 +426,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 200_542_000 picoseconds. - Weight::from_parts(209_713_000, 6766) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Estimated: `4291` + // Minimum execution time: 201_900_000 picoseconds. + Weight::from_parts(206_738_000, 4291) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -465,8 +441,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -474,12 +448,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 258_375_000 picoseconds. - Weight::from_parts(271_214_455, 3607) - // Standard Error: 61 - .saturating_add(Weight::from_parts(32_587, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 330_704_000 picoseconds. + Weight::from_parts(345_129_342, 3607) + // Standard Error: 51 + .saturating_add(Weight::from_parts(33_126, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -487,8 +461,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -496,12 +468,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 279_363_000 picoseconds. - Weight::from_parts(257_721_413, 3607) - // Standard Error: 81 - .saturating_add(Weight::from_parts(33_850, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 343_339_000 picoseconds. + Weight::from_parts(356_479_729, 3607) + // Standard Error: 49 + .saturating_add(Weight::from_parts(33_404, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -509,18 +481,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 45_096_000 picoseconds. - Weight::from_parts(46_661_000, 3780) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 42_241_000 picoseconds. + Weight::from_parts(43_365_000, 3780) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -528,391 +498,238 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `552` - // Estimated: `8967` - // Minimum execution time: 34_260_000 picoseconds. - Weight::from_parts(35_761_000, 8967) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Estimated: `6492` + // Minimum execution time: 26_318_000 picoseconds. + Weight::from_parts(27_840_000, 6492) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// The range of component `r` is `[0, 1600]`. - fn seal_caller(r: u32, ) -> Weight { + fn noop_host_fn(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_397_000 picoseconds. + Weight::from_parts(9_318_986, 0) + // Standard Error: 72 + .saturating_add(Weight::from_parts(72_994, 0).saturating_mul(r.into())) + } + fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_265_000 picoseconds. - Weight::from_parts(10_174_088, 0) - // Standard Error: 275 - .saturating_add(Weight::from_parts(271_791, 0).saturating_mul(r.into())) + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(687_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_is_contract(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `509 + r * (77 ยฑ0)` - // Estimated: `1467 + r * (2552 ยฑ0)` - // Minimum execution time: 10_498_000 picoseconds. - Weight::from_parts(10_551_000, 1467) - // Standard Error: 5_538 - .saturating_add(Weight::from_parts(3_269_462, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + fn seal_is_contract() -> Weight { + // Proof Size summary in bytes: + // Measured: `354` + // Estimated: `3819` + // Minimum execution time: 6_465_000 picoseconds. + Weight::from_parts(6_850_000, 3819) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_code_hash(r: u32, ) -> Weight { + fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `517 + r * (170 ยฑ0)` - // Estimated: `1468 + r * (2645 ยฑ0)` - // Minimum execution time: 10_289_000 picoseconds. - Weight::from_parts(10_469_000, 1468) - // Standard Error: 5_674 - .saturating_add(Weight::from_parts(4_105_274, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) + // Measured: `447` + // Estimated: `3912` + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_115_000, 3912) + .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_own_code_hash(r: u32, ) -> Weight { + fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_769_000 picoseconds. - Weight::from_parts(10_389_944, 0) - // Standard Error: 240 - .saturating_add(Weight::from_parts(350_466, 0).saturating_mul(r.into())) + // Minimum execution time: 717_000 picoseconds. + Weight::from_parts(791_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_origin(r: u32, ) -> Weight { + fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_443_000 picoseconds. - Weight::from_parts(11_651_820, 0) - // Standard Error: 91 - .saturating_add(Weight::from_parts(100_579, 0).saturating_mul(r.into())) + // Minimum execution time: 365_000 picoseconds. + Weight::from_parts(427_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_root(r: u32, ) -> Weight { + fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_474_000 picoseconds. - Weight::from_parts(11_313_654, 0) - // Standard Error: 103 - .saturating_add(Weight::from_parts(85_902, 0).saturating_mul(r.into())) + // Minimum execution time: 331_000 picoseconds. + Weight::from_parts(363_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_address(r: u32, ) -> Weight { + fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_360_000 picoseconds. - Weight::from_parts(11_283_384, 0) - // Standard Error: 163 - .saturating_add(Weight::from_parts(253_111, 0).saturating_mul(r.into())) + // Minimum execution time: 586_000 picoseconds. + Weight::from_parts(625_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_gas_left(r: u32, ) -> Weight { + fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_289_000 picoseconds. - Weight::from_parts(10_747_872, 0) - // Standard Error: 197 - .saturating_add(Weight::from_parts(299_097, 0).saturating_mul(r.into())) + // Minimum execution time: 680_000 picoseconds. + Weight::from_parts(734_000, 0) } - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_balance(r: u32, ) -> Weight { + fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` - // Estimated: `3599` - // Minimum execution time: 10_368_000 picoseconds. - Weight::from_parts(29_685_372, 3599) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(1_517_645, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Estimated: `0` + // Minimum execution time: 4_732_000 picoseconds. + Weight::from_parts(5_008_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_value_transferred(r: u32, ) -> Weight { + fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_528_000 picoseconds. - Weight::from_parts(11_653_603, 0) - // Standard Error: 203 - .saturating_add(Weight::from_parts(241_937, 0).saturating_mul(r.into())) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(635_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_minimum_balance(r: u32, ) -> Weight { + fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_385_000 picoseconds. - Weight::from_parts(11_483_212, 0) - // Standard Error: 227 - .saturating_add(Weight::from_parts(248_076, 0).saturating_mul(r.into())) + // Minimum execution time: 571_000 picoseconds. + Weight::from_parts(606_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_block_number(r: u32, ) -> Weight { + fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_341_000 picoseconds. - Weight::from_parts(12_055_382, 0) - // Standard Error: 1_231 - .saturating_add(Weight::from_parts(249_662, 0).saturating_mul(r.into())) + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(584_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_now(r: u32, ) -> Weight { + fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_467_000 picoseconds. - Weight::from_parts(10_579_667, 0) - // Standard Error: 247 - .saturating_add(Weight::from_parts(246_711, 0).saturating_mul(r.into())) + // Minimum execution time: 552_000 picoseconds. + Weight::from_parts(612_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_weight_to_fee(r: u32, ) -> Weight { + fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 10_293_000 picoseconds. - Weight::from_parts(18_229_738, 1552) - // Standard Error: 452 - .saturating_add(Weight::from_parts(655_277, 0).saturating_mul(r.into())) + // Minimum execution time: 4_396_000 picoseconds. + Weight::from_parts(4_630_000, 1552) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_input(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_355_000 picoseconds. - Weight::from_parts(11_641_920, 0) - // Standard Error: 166 - .saturating_add(Weight::from_parts(168_271, 0).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 1048576]`. - fn seal_input_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 268_424_000 picoseconds. - Weight::from_parts(136_261_773, 6809) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_373, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_044_000 picoseconds. - Weight::from_parts(10_550_491, 0) - // Standard Error: 20_456 - .saturating_add(Weight::from_parts(925_808, 0).saturating_mul(r.into())) + // Minimum execution time: 494_000 picoseconds. + Weight::from_parts(510_000, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(303, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. - fn seal_return_per_byte(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_361_000 picoseconds. - Weight::from_parts(11_935_556, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(315, 0).saturating_mul(n.into())) + // Minimum execution time: 311_000 picoseconds. + Weight::from_parts(346_000, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(480, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// The range of component `r` is `[0, 1]`. - fn seal_terminate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `4802 + r * (2121 ยฑ0)` - // Estimated: `10742 + r * (81321 ยฑ0)` - // Minimum execution time: 293_793_000 picoseconds. - Weight::from_parts(314_285_185, 10742) - // Standard Error: 808_383 - .saturating_add(Weight::from_parts(256_215_014, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().reads((38_u64).saturating_mul(r.into()))) + /// The range of component `n` is `[0, 32]`. + fn seal_terminate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `319 + n * (78 ยฑ0)` + // Estimated: `3784 + n * (2553 ยฑ0)` + // Minimum execution time: 14_403_000 picoseconds. + Weight::from_parts(16_478_113, 3784) + // Standard Error: 6_667 + .saturating_add(Weight::from_parts(3_641_603, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((41_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2553).saturating_mul(n.into())) } /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_random(r: u32, ) -> Weight { + fn seal_random() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 10_323_000 picoseconds. - Weight::from_parts(10_996_645, 1561) - // Standard Error: 566 - .saturating_add(Weight::from_parts(1_133_870, 0).saturating_mul(r.into())) + // Minimum execution time: 3_639_000 picoseconds. + Weight::from_parts(3_801_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_deposit_event(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_122_000 picoseconds. - Weight::from_parts(17_368_451, 0) - // Standard Error: 679 - .saturating_add(Weight::from_parts(1_660_129, 0).saturating_mul(r.into())) - } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. - fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { + fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ยฑ0)` - // Minimum execution time: 24_515_000 picoseconds. - Weight::from_parts(16_807_493, 990) - // Standard Error: 13_923 - .saturating_add(Weight::from_parts(2_315_122, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(573, 0).saturating_mul(n.into())) + // Minimum execution time: 4_102_000 picoseconds. + Weight::from_parts(4_256_984, 990) + // Standard Error: 6_777 + .saturating_add(Weight::from_parts(2_331_893, 0).saturating_mul(t.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) } - /// The range of component `r` is `[0, 1600]`. - fn seal_debug_message(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_596_000 picoseconds. - Weight::from_parts(9_113_960, 0) - // Standard Error: 139 - .saturating_add(Weight::from_parts(112_197, 0).saturating_mul(r.into())) - } /// The range of component `i` is `[0, 1048576]`. - fn seal_debug_message_per_byte(i: u32, ) -> Weight { + fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_260_000 picoseconds. - Weight::from_parts(11_341_000, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(984, 0).saturating_mul(i.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_set_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_660_000 picoseconds. - Weight::from_parts(10_762_000, 105) - // Standard Error: 7_920 - .saturating_add(Weight::from_parts(5_122_380, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `245` - // Minimum execution time: 19_446_000 picoseconds. - Weight::from_parts(20_166_940, 245) - // Standard Error: 2 - .saturating_add(Weight::from_parts(287, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 385_000 picoseconds. + Weight::from_parts(427_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_272, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { + /// The range of component `o` is `[0, 16384]`. + fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ยฑ0)` - // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_249_000 picoseconds. - Weight::from_parts(20_875_560, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(73, 0).saturating_mul(n.into())) + // Measured: `250 + o * (1 ยฑ0)` + // Estimated: `249 + o * (1 ยฑ0)` + // Minimum execution time: 10_128_000 picoseconds. + Weight::from_parts(9_963_519, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(327, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(58, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_clear_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_477_000 picoseconds. - Weight::from_parts(10_633_000, 105) - // Standard Error: 8_552 - .saturating_add(Weight::from_parts(5_159_505, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_clear_storage_per_byte(n: u32, ) -> Weight { + fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_265_000 picoseconds. - Weight::from_parts(20_699_861, 248) + // Minimum execution time: 7_921_000 picoseconds. + Weight::from_parts(9_290_526, 248) // Standard Error: 2 .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) @@ -921,205 +738,91 @@ impl WeightInfo for SubstrateWeight { } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_get_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_336_000 picoseconds. - Weight::from_parts(10_466_000, 105) - // Standard Error: 7_699 - .saturating_add(Weight::from_parts(4_542_224, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_get_storage_per_byte(n: u32, ) -> Weight { + fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 18_513_000 picoseconds. - Weight::from_parts(20_357_236, 248) + // Minimum execution time: 7_403_000 picoseconds. + Weight::from_parts(8_815_037, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(588, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(701, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_contains_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_432_000 picoseconds. - Weight::from_parts(10_658_000, 105) - // Standard Error: 7_129 - .saturating_add(Weight::from_parts(4_423_298, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_contains_storage_per_byte(n: u32, ) -> Weight { + fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 17_663_000 picoseconds. - Weight::from_parts(19_107_828, 248) + // Minimum execution time: 6_590_000 picoseconds. + Weight::from_parts(7_949_861, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_take_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_254_000 picoseconds. - Weight::from_parts(10_332_000, 105) - // Standard Error: 9_485 - .saturating_add(Weight::from_parts(5_242_433, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_take_storage_per_byte(n: u32, ) -> Weight { + fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_410_000 picoseconds. - Weight::from_parts(21_347_311, 248) + // Minimum execution time: 7_900_000 picoseconds. + Weight::from_parts(9_988_151, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(607, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: `System::Account` (r:1601 w:1601) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_transfer(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `4221 + r * (2475 ยฑ0)` - // Minimum execution time: 10_365_000 picoseconds. - Weight::from_parts(10_514_000, 4221) - // Standard Error: 18_360 - .saturating_add(Weight::from_parts(33_433_850, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2475).saturating_mul(r.into())) + fn seal_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `0` + // Minimum execution time: 9_023_000 picoseconds. + Weight::from_parts(9_375_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `517 + r * (170 ยฑ0)` - // Estimated: `3985 + r * (2646 ยฑ0)` - // Minimum execution time: 10_332_000 picoseconds. - Weight::from_parts(10_424_000, 3985) - // Standard Error: 117_754 - .saturating_add(Weight::from_parts(242_191_645, 0).saturating_mul(r.into())) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// The range of component `t` is `[0, 1]`. + /// The range of component `i` is `[0, 1048576]`. + fn seal_call(t: u32, i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `620 + t * (280 ยฑ0)` + // Estimated: `4085 + t * (2182 ยฑ0)` + // Minimum execution time: 157_109_000 picoseconds. + Weight::from_parts(159_458_069, 4085) + // Standard Error: 339_702 + .saturating_add(Weight::from_parts(44_066_869, 0).saturating_mul(t.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2646).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2182).saturating_mul(t.into())) } - /// Storage: `Contracts::CodeInfoOf` (r:735 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:735 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:736 w:736) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_delegate_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (527 ยฑ0)` - // Estimated: `6444 + r * (2583 ยฑ10)` - // Minimum execution time: 10_550_000 picoseconds. - Weight::from_parts(10_667_000, 6444) - // Standard Error: 147_918 - .saturating_add(Weight::from_parts(242_824_174, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `t` is `[0, 1]`. - /// The range of component `c` is `[0, 1048576]`. - fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `696 + t * (277 ยฑ0)` - // Estimated: `6636 + t * (3457 ยฑ0)` - // Minimum execution time: 213_206_000 picoseconds. - Weight::from_parts(120_511_970, 6636) - // Standard Error: 2_501_856 - .saturating_add(Weight::from_parts(40_016_645, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(420, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 3457).saturating_mul(t.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:800 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:802 w:802) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[1, 800]`. - fn seal_instantiate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1094 + r * (188 ยฑ0)` - // Estimated: `6987 + r * (2664 ยฑ0)` - // Minimum execution time: 334_708_000 picoseconds. - Weight::from_parts(346_676_000, 6987) - // Standard Error: 236_074 - .saturating_add(Weight::from_parts(330_734_734, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2664).saturating_mul(r.into())) + fn seal_delegate_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 143_384_000 picoseconds. + Weight::from_parts(147_554_000, 3895) + .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1127,250 +830,149 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + t * (104 ยฑ0)` - // Estimated: `6716 + t * (2549 ยฑ1)` - // Minimum execution time: 1_854_462_000 picoseconds. - Weight::from_parts(855_253_052, 6716) - // Standard Error: 13_502_046 - .saturating_add(Weight::from_parts(20_015_409, 0).saturating_mul(t.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_060, 0).saturating_mul(i.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_201, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(7_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2549).saturating_mul(t.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_384_000 picoseconds. - Weight::from_parts(10_319_961, 0) - // Standard Error: 293 - .saturating_add(Weight::from_parts(267_788, 0).saturating_mul(r.into())) + fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `676` + // Estimated: `4138` + // Minimum execution time: 1_798_243_000 picoseconds. + Weight::from_parts(82_642_573, 4138) + // Standard Error: 6_831_260 + .saturating_add(Weight::from_parts(159_867_027, 0).saturating_mul(t.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_534, 0).saturating_mul(i.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_809, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_991_000 picoseconds. - Weight::from_parts(792_256, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_071, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_keccak_256(r: u32, ) -> Weight { + fn seal_hash_sha2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_210_000 picoseconds. - Weight::from_parts(8_251_750, 0) - // Standard Error: 584 - .saturating_add(Weight::from_parts(662_961, 0).saturating_mul(r.into())) + // Minimum execution time: 875_000 picoseconds. + Weight::from_parts(904_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_994_000 picoseconds. - Weight::from_parts(6_532_799, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_351, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_209_000 picoseconds. - Weight::from_parts(10_895_450, 0) - // Standard Error: 195 - .saturating_add(Weight::from_parts(328_195, 0).saturating_mul(r.into())) + // Minimum execution time: 1_475_000 picoseconds. + Weight::from_parts(1_551_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3_410, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_493_000 picoseconds. - Weight::from_parts(4_721_812, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(850_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_134_000 picoseconds. - Weight::from_parts(11_712_472, 0) - // Standard Error: 316 - .saturating_add(Weight::from_parts(335_912, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(1_279, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_448_000 picoseconds. - Weight::from_parts(1_407_440, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_205, 0).saturating_mul(n.into())) + // Minimum execution time: 747_000 picoseconds. + Weight::from_parts(773_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_276, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 54_644_000 picoseconds. - Weight::from_parts(55_793_413, 0) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_511, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 160]`. - fn seal_sr25519_verify(r: u32, ) -> Weight { + fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_378_000 picoseconds. - Weight::from_parts(25_185_485, 0) - // Standard Error: 8_828 - .saturating_add(Weight::from_parts(41_091_818, 0).saturating_mul(r.into())) + // Minimum execution time: 43_154_000 picoseconds. + Weight::from_parts(45_087_558, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(4_628, 0).saturating_mul(n.into())) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_recover(r: u32, ) -> Weight { + fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_371_000 picoseconds. - Weight::from_parts(35_350_533, 0) - // Standard Error: 9_805 - .saturating_add(Weight::from_parts(45_466_060, 0).saturating_mul(r.into())) + // Minimum execution time: 47_193_000 picoseconds. + Weight::from_parts(48_514_000, 0) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { + fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_407_000 picoseconds. - Weight::from_parts(14_375_492, 0) - // Standard Error: 4_036 - .saturating_add(Weight::from_parts(11_666_630, 0).saturating_mul(r.into())) + // Minimum execution time: 13_083_000 picoseconds. + Weight::from_parts(13_218_000, 0) } - /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1535 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1537 w:1537) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_set_code_hash(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (926 ยฑ0)` - // Estimated: `8966 + r * (3047 ยฑ10)` - // Minimum execution time: 10_566_000 picoseconds. - Weight::from_parts(10_627_000, 8966) - // Standard Error: 46_429 - .saturating_add(Weight::from_parts(22_435_893, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn seal_set_code_hash() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_116_000, 3895) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `r` is `[0, 32]`. - fn lock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `274 + r * (78 ยฑ0)` - // Estimated: `1265 + r * (2553 ยฑ0)` - // Minimum execution time: 10_305_000 picoseconds. - Weight::from_parts(16_073_202, 1265) - // Standard Error: 8_841 - .saturating_add(Weight::from_parts(5_125_440, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn lock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3820` + // Minimum execution time: 9_271_000 picoseconds. + Weight::from_parts(9_640_000, 3820) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 32]`. - fn unlock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `275 + r * (78 ยฑ0)` - // Estimated: `990 + r * (2568 ยฑ0)` - // Minimum execution time: 10_389_000 picoseconds. - Weight::from_parts(16_221_879, 990) - // Standard Error: 9_409 - .saturating_add(Weight::from_parts(4_235_040, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) + fn unlock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3558` + // Minimum execution time: 8_182_000 picoseconds. + Weight::from_parts(8_343_000, 3558) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_reentrance_count(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `858 + r * (3 ยฑ0)` - // Estimated: `6804 + r * (3 ยฑ0)` - // Minimum execution time: 265_499_000 picoseconds. - Weight::from_parts(282_172_889, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(165_070, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + fn seal_reentrance_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(347_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_account_reentrance_count(r: u32, ) -> Weight { + fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_367_000 picoseconds. - Weight::from_parts(13_220_303, 0) - // Standard Error: 151 - .saturating_add(Weight::from_parts(86_117, 0).saturating_mul(r.into())) + // Minimum execution time: 345_000 picoseconds. + Weight::from_parts(370_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_instantiation_nonce(r: u32, ) -> Weight { + fn seal_instantiation_nonce() -> Weight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 10_223_000 picoseconds. - Weight::from_parts(14_170_002, 1704) - // Standard Error: 71 - .saturating_add(Weight::from_parts(76_372, 0).saturating_mul(r.into())) + // Minimum execution time: 2_998_000 picoseconds. + Weight::from_parts(3_221_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1378,10 +980,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 754_000 picoseconds. - Weight::from_parts(1_091_740, 0) - // Standard Error: 29 - .saturating_add(Weight::from_parts(14_954, 0).saturating_mul(r.into())) + // Minimum execution time: 1_002_000 picoseconds. + Weight::from_parts(1_094_958, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(14_531, 0).saturating_mul(r.into())) } } @@ -1393,8 +995,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_002_000 picoseconds. - Weight::from_parts(2_193_000, 1627) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_142_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1404,10 +1006,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ยฑ0)` // Estimated: `442 + k * (70 ยฑ0)` - // Minimum execution time: 12_339_000 picoseconds. - Weight::from_parts(12_682_000, 442) - // Standard Error: 1_302 - .saturating_add(Weight::from_parts(1_163_234, 0).saturating_mul(k.into())) + // Minimum execution time: 12_095_000 picoseconds. + Weight::from_parts(12_699_000, 442) + // Standard Error: 891 + .saturating_add(Weight::from_parts(1_114_063, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1421,10 +1023,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ยฑ0)` // Estimated: `6149 + c * (1 ยฑ0)` - // Minimum execution time: 8_145_000 picoseconds. - Weight::from_parts(8_747_247, 6149) + // Minimum execution time: 8_433_000 picoseconds. + Weight::from_parts(8_992_328, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_154, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1437,8 +1039,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_950_000 picoseconds. - Weight::from_parts(17_498_000, 6450) + // Minimum execution time: 16_415_000 picoseconds. + Weight::from_parts(17_348_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1451,10 +1053,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ยฑ0)` // Estimated: `3635 + k * (1 ยฑ0)` - // Minimum execution time: 3_431_000 picoseconds. - Weight::from_parts(2_161_027, 3635) - // Standard Error: 949 - .saturating_add(Weight::from_parts(1_219_406, 0).saturating_mul(k.into())) + // Minimum execution time: 3_433_000 picoseconds. + Weight::from_parts(3_490_000, 3635) + // Standard Error: 1_043 + .saturating_add(Weight::from_parts(1_225_953, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1473,10 +1075,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `325 + c * (1 ยฑ0)` // Estimated: `6263 + c * (1 ยฑ0)` - // Minimum execution time: 16_384_000 picoseconds. - Weight::from_parts(16_741_331, 6263) - // Standard Error: 1 - .saturating_add(Weight::from_parts(375, 0).saturating_mul(c.into())) + // Minimum execution time: 16_421_000 picoseconds. + Weight::from_parts(16_822_963, 6263) + // Standard Error: 0 + .saturating_add(Weight::from_parts(456, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1487,8 +1089,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_529_000 picoseconds. - Weight::from_parts(13_319_000, 6380) + // Minimum execution time: 12_569_000 picoseconds. + Weight::from_parts(13_277_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1502,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_462_000 picoseconds. - Weight::from_parts(48_784_000, 6292) + // Minimum execution time: 46_777_000 picoseconds. + Weight::from_parts(47_690_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1515,8 +1117,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 55_712_000 picoseconds. - Weight::from_parts(58_629_000, 6534) + // Minimum execution time: 55_280_000 picoseconds. + Weight::from_parts(57_081_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1526,8 +1128,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_992_000 picoseconds. - Weight::from_parts(12_686_000, 6349) + // Minimum execution time: 12_077_000 picoseconds. + Weight::from_parts(12_647_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1537,8 +1139,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_498_000 picoseconds. - Weight::from_parts(2_594_000, 1627) + // Minimum execution time: 2_559_000 picoseconds. + Weight::from_parts(2_711_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1550,8 +1152,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_179_000 picoseconds. - Weight::from_parts(12_805_000, 3631) + // Minimum execution time: 12_238_000 picoseconds. + Weight::from_parts(12_627_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1561,8 +1163,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_695_000 picoseconds. - Weight::from_parts(5_105_000, 3607) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_086_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1573,8 +1175,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_223_000 picoseconds. - Weight::from_parts(6_509_000, 3632) + // Minimum execution time: 6_147_000 picoseconds. + Weight::from_parts(6_380_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1585,8 +1187,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_073_000 picoseconds. - Weight::from_parts(6_524_000, 3607) + // Minimum execution time: 6_140_000 picoseconds. + Weight::from_parts(6_670_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1602,19 +1204,17 @@ impl WeightInfo for () { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `801 + c * (1 ยฑ0)` - // Estimated: `6739 + c * (1 ยฑ0)` - // Minimum execution time: 289_627_000 picoseconds. - Weight::from_parts(281_167_857, 6739) - // Standard Error: 68 - .saturating_add(Weight::from_parts(33_442, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Estimated: `4264 + c * (1 ยฑ0)` + // Minimum execution time: 354_459_000 picoseconds. + Weight::from_parts(332_397_871, 4264) + // Standard Error: 70 + .saturating_add(Weight::from_parts(33_775, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) @@ -1623,8 +1223,6 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1641,17 +1239,17 @@ impl WeightInfo for () { fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `323` - // Estimated: `8737` - // Minimum execution time: 3_829_638_000 picoseconds. - Weight::from_parts(744_994_885, 8737) - // Standard Error: 165 - .saturating_add(Weight::from_parts(68_083, 0).saturating_mul(c.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_484, 0).saturating_mul(i.into())) - // Standard Error: 19 - .saturating_add(Weight::from_parts(1_581, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) + // Estimated: `6262` + // Minimum execution time: 4_239_452_000 picoseconds. + Weight::from_parts(800_849_282, 6262) + // Standard Error: 117 + .saturating_add(Weight::from_parts(68_435, 0).saturating_mul(c.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(i.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_668, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1667,8 +1265,6 @@ impl WeightInfo for () { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. @@ -1676,15 +1272,15 @@ impl WeightInfo for () { fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `560` - // Estimated: `6504` - // Minimum execution time: 1_960_218_000 picoseconds. - Weight::from_parts(1_976_273_000, 6504) - // Standard Error: 25 - .saturating_add(Weight::from_parts(866, 0).saturating_mul(i.into())) - // Standard Error: 25 - .saturating_add(Weight::from_parts(824, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(7_u64)) + // Estimated: `4029` + // Minimum execution time: 2_085_570_000 picoseconds. + Weight::from_parts(2_112_501_000, 4029) + // Standard Error: 26 + .saturating_add(Weight::from_parts(888, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(795, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1698,16 +1294,14 @@ impl WeightInfo for () { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: // Measured: `826` - // Estimated: `6766` - // Minimum execution time: 200_542_000 picoseconds. - Weight::from_parts(209_713_000, 6766) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Estimated: `4291` + // Minimum execution time: 201_900_000 picoseconds. + Weight::from_parts(206_738_000, 4291) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1715,8 +1309,6 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -1724,12 +1316,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 258_375_000 picoseconds. - Weight::from_parts(271_214_455, 3607) - // Standard Error: 61 - .saturating_add(Weight::from_parts(32_587, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 330_704_000 picoseconds. + Weight::from_parts(345_129_342, 3607) + // Standard Error: 51 + .saturating_add(Weight::from_parts(33_126, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1737,8 +1329,6 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. @@ -1746,12 +1336,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 279_363_000 picoseconds. - Weight::from_parts(257_721_413, 3607) - // Standard Error: 81 - .saturating_add(Weight::from_parts(33_850, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 343_339_000 picoseconds. + Weight::from_parts(356_479_729, 3607) + // Standard Error: 49 + .saturating_add(Weight::from_parts(33_404, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1759,18 +1349,16 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 45_096_000 picoseconds. - Weight::from_parts(46_661_000, 3780) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 42_241_000 picoseconds. + Weight::from_parts(43_365_000, 3780) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1778,391 +1366,238 @@ impl WeightInfo for () { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `552` - // Estimated: `8967` - // Minimum execution time: 34_260_000 picoseconds. - Weight::from_parts(35_761_000, 8967) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Estimated: `6492` + // Minimum execution time: 26_318_000 picoseconds. + Weight::from_parts(27_840_000, 6492) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// The range of component `r` is `[0, 1600]`. - fn seal_caller(r: u32, ) -> Weight { + fn noop_host_fn(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_397_000 picoseconds. + Weight::from_parts(9_318_986, 0) + // Standard Error: 72 + .saturating_add(Weight::from_parts(72_994, 0).saturating_mul(r.into())) + } + fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_265_000 picoseconds. - Weight::from_parts(10_174_088, 0) - // Standard Error: 275 - .saturating_add(Weight::from_parts(271_791, 0).saturating_mul(r.into())) + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(687_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_is_contract(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `509 + r * (77 ยฑ0)` - // Estimated: `1467 + r * (2552 ยฑ0)` - // Minimum execution time: 10_498_000 picoseconds. - Weight::from_parts(10_551_000, 1467) - // Standard Error: 5_538 - .saturating_add(Weight::from_parts(3_269_462, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + fn seal_is_contract() -> Weight { + // Proof Size summary in bytes: + // Measured: `354` + // Estimated: `3819` + // Minimum execution time: 6_465_000 picoseconds. + Weight::from_parts(6_850_000, 3819) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_code_hash(r: u32, ) -> Weight { + fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `517 + r * (170 ยฑ0)` - // Estimated: `1468 + r * (2645 ยฑ0)` - // Minimum execution time: 10_289_000 picoseconds. - Weight::from_parts(10_469_000, 1468) - // Standard Error: 5_674 - .saturating_add(Weight::from_parts(4_105_274, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) + // Measured: `447` + // Estimated: `3912` + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_115_000, 3912) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_own_code_hash(r: u32, ) -> Weight { + fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_769_000 picoseconds. - Weight::from_parts(10_389_944, 0) - // Standard Error: 240 - .saturating_add(Weight::from_parts(350_466, 0).saturating_mul(r.into())) + // Minimum execution time: 717_000 picoseconds. + Weight::from_parts(791_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_origin(r: u32, ) -> Weight { + fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_443_000 picoseconds. - Weight::from_parts(11_651_820, 0) - // Standard Error: 91 - .saturating_add(Weight::from_parts(100_579, 0).saturating_mul(r.into())) + // Minimum execution time: 365_000 picoseconds. + Weight::from_parts(427_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_root(r: u32, ) -> Weight { + fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_474_000 picoseconds. - Weight::from_parts(11_313_654, 0) - // Standard Error: 103 - .saturating_add(Weight::from_parts(85_902, 0).saturating_mul(r.into())) + // Minimum execution time: 331_000 picoseconds. + Weight::from_parts(363_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_address(r: u32, ) -> Weight { + fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_360_000 picoseconds. - Weight::from_parts(11_283_384, 0) - // Standard Error: 163 - .saturating_add(Weight::from_parts(253_111, 0).saturating_mul(r.into())) + // Minimum execution time: 586_000 picoseconds. + Weight::from_parts(625_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_gas_left(r: u32, ) -> Weight { + fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_289_000 picoseconds. - Weight::from_parts(10_747_872, 0) - // Standard Error: 197 - .saturating_add(Weight::from_parts(299_097, 0).saturating_mul(r.into())) + // Minimum execution time: 680_000 picoseconds. + Weight::from_parts(734_000, 0) } - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_balance(r: u32, ) -> Weight { + fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` - // Estimated: `3599` - // Minimum execution time: 10_368_000 picoseconds. - Weight::from_parts(29_685_372, 3599) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(1_517_645, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Estimated: `0` + // Minimum execution time: 4_732_000 picoseconds. + Weight::from_parts(5_008_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_value_transferred(r: u32, ) -> Weight { + fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_528_000 picoseconds. - Weight::from_parts(11_653_603, 0) - // Standard Error: 203 - .saturating_add(Weight::from_parts(241_937, 0).saturating_mul(r.into())) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(635_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_minimum_balance(r: u32, ) -> Weight { + fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_385_000 picoseconds. - Weight::from_parts(11_483_212, 0) - // Standard Error: 227 - .saturating_add(Weight::from_parts(248_076, 0).saturating_mul(r.into())) + // Minimum execution time: 571_000 picoseconds. + Weight::from_parts(606_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_block_number(r: u32, ) -> Weight { + fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_341_000 picoseconds. - Weight::from_parts(12_055_382, 0) - // Standard Error: 1_231 - .saturating_add(Weight::from_parts(249_662, 0).saturating_mul(r.into())) + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(584_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_now(r: u32, ) -> Weight { + fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_467_000 picoseconds. - Weight::from_parts(10_579_667, 0) - // Standard Error: 247 - .saturating_add(Weight::from_parts(246_711, 0).saturating_mul(r.into())) + // Minimum execution time: 552_000 picoseconds. + Weight::from_parts(612_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_weight_to_fee(r: u32, ) -> Weight { + fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 10_293_000 picoseconds. - Weight::from_parts(18_229_738, 1552) - // Standard Error: 452 - .saturating_add(Weight::from_parts(655_277, 0).saturating_mul(r.into())) + // Minimum execution time: 4_396_000 picoseconds. + Weight::from_parts(4_630_000, 1552) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_input(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_355_000 picoseconds. - Weight::from_parts(11_641_920, 0) - // Standard Error: 166 - .saturating_add(Weight::from_parts(168_271, 0).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 1048576]`. - fn seal_input_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `869` - // Estimated: `6809` - // Minimum execution time: 268_424_000 picoseconds. - Weight::from_parts(136_261_773, 6809) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_373, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_044_000 picoseconds. - Weight::from_parts(10_550_491, 0) - // Standard Error: 20_456 - .saturating_add(Weight::from_parts(925_808, 0).saturating_mul(r.into())) + // Minimum execution time: 494_000 picoseconds. + Weight::from_parts(510_000, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(303, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. - fn seal_return_per_byte(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_361_000 picoseconds. - Weight::from_parts(11_935_556, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(315, 0).saturating_mul(n.into())) + // Minimum execution time: 311_000 picoseconds. + Weight::from_parts(346_000, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(480, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// The range of component `r` is `[0, 1]`. - fn seal_terminate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `4802 + r * (2121 ยฑ0)` - // Estimated: `10742 + r * (81321 ยฑ0)` - // Minimum execution time: 293_793_000 picoseconds. - Weight::from_parts(314_285_185, 10742) - // Standard Error: 808_383 - .saturating_add(Weight::from_parts(256_215_014, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().reads((38_u64).saturating_mul(r.into()))) + /// The range of component `n` is `[0, 32]`. + fn seal_terminate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `319 + n * (78 ยฑ0)` + // Estimated: `3784 + n * (2553 ยฑ0)` + // Minimum execution time: 14_403_000 picoseconds. + Weight::from_parts(16_478_113, 3784) + // Standard Error: 6_667 + .saturating_add(Weight::from_parts(3_641_603, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((41_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2553).saturating_mul(n.into())) } /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_random(r: u32, ) -> Weight { + fn seal_random() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 10_323_000 picoseconds. - Weight::from_parts(10_996_645, 1561) - // Standard Error: 566 - .saturating_add(Weight::from_parts(1_133_870, 0).saturating_mul(r.into())) + // Minimum execution time: 3_639_000 picoseconds. + Weight::from_parts(3_801_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_deposit_event(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_122_000 picoseconds. - Weight::from_parts(17_368_451, 0) - // Standard Error: 679 - .saturating_add(Weight::from_parts(1_660_129, 0).saturating_mul(r.into())) - } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. - fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { + fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ยฑ0)` - // Minimum execution time: 24_515_000 picoseconds. - Weight::from_parts(16_807_493, 990) - // Standard Error: 13_923 - .saturating_add(Weight::from_parts(2_315_122, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(573, 0).saturating_mul(n.into())) + // Minimum execution time: 4_102_000 picoseconds. + Weight::from_parts(4_256_984, 990) + // Standard Error: 6_777 + .saturating_add(Weight::from_parts(2_331_893, 0).saturating_mul(t.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) } - /// The range of component `r` is `[0, 1600]`. - fn seal_debug_message(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_596_000 picoseconds. - Weight::from_parts(9_113_960, 0) - // Standard Error: 139 - .saturating_add(Weight::from_parts(112_197, 0).saturating_mul(r.into())) - } /// The range of component `i` is `[0, 1048576]`. - fn seal_debug_message_per_byte(i: u32, ) -> Weight { + fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_260_000 picoseconds. - Weight::from_parts(11_341_000, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(984, 0).saturating_mul(i.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_set_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_660_000 picoseconds. - Weight::from_parts(10_762_000, 105) - // Standard Error: 7_920 - .saturating_add(Weight::from_parts(5_122_380, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `245` - // Minimum execution time: 19_446_000 picoseconds. - Weight::from_parts(20_166_940, 245) - // Standard Error: 2 - .saturating_add(Weight::from_parts(287, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 385_000 picoseconds. + Weight::from_parts(427_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_272, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { + /// The range of component `o` is `[0, 16384]`. + fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ยฑ0)` - // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_249_000 picoseconds. - Weight::from_parts(20_875_560, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(73, 0).saturating_mul(n.into())) + // Measured: `250 + o * (1 ยฑ0)` + // Estimated: `249 + o * (1 ยฑ0)` + // Minimum execution time: 10_128_000 picoseconds. + Weight::from_parts(9_963_519, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(327, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(58, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_clear_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_477_000 picoseconds. - Weight::from_parts(10_633_000, 105) - // Standard Error: 8_552 - .saturating_add(Weight::from_parts(5_159_505, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_clear_storage_per_byte(n: u32, ) -> Weight { + fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_265_000 picoseconds. - Weight::from_parts(20_699_861, 248) + // Minimum execution time: 7_921_000 picoseconds. + Weight::from_parts(9_290_526, 248) // Standard Error: 2 .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) @@ -2171,205 +1606,91 @@ impl WeightInfo for () { } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_get_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_336_000 picoseconds. - Weight::from_parts(10_466_000, 105) - // Standard Error: 7_699 - .saturating_add(Weight::from_parts(4_542_224, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_get_storage_per_byte(n: u32, ) -> Weight { + fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 18_513_000 picoseconds. - Weight::from_parts(20_357_236, 248) + // Minimum execution time: 7_403_000 picoseconds. + Weight::from_parts(8_815_037, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(588, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(701, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_contains_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_432_000 picoseconds. - Weight::from_parts(10_658_000, 105) - // Standard Error: 7_129 - .saturating_add(Weight::from_parts(4_423_298, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_contains_storage_per_byte(n: u32, ) -> Weight { + fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 17_663_000 picoseconds. - Weight::from_parts(19_107_828, 248) + // Minimum execution time: 6_590_000 picoseconds. + Weight::from_parts(7_949_861, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_take_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ยฑ0)` - // Estimated: `105 + r * (151 ยฑ0)` - // Minimum execution time: 10_254_000 picoseconds. - Weight::from_parts(10_332_000, 105) - // Standard Error: 9_485 - .saturating_add(Weight::from_parts(5_242_433, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_take_storage_per_byte(n: u32, ) -> Weight { + fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ยฑ0)` // Estimated: `248 + n * (1 ยฑ0)` - // Minimum execution time: 19_410_000 picoseconds. - Weight::from_parts(21_347_311, 248) + // Minimum execution time: 7_900_000 picoseconds. + Weight::from_parts(9_988_151, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(607, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: `System::Account` (r:1601 w:1601) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_transfer(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `4221 + r * (2475 ยฑ0)` - // Minimum execution time: 10_365_000 picoseconds. - Weight::from_parts(10_514_000, 4221) - // Standard Error: 18_360 - .saturating_add(Weight::from_parts(33_433_850, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2475).saturating_mul(r.into())) + fn seal_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `0` + // Minimum execution time: 9_023_000 picoseconds. + Weight::from_parts(9_375_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `517 + r * (170 ยฑ0)` - // Estimated: `3985 + r * (2646 ยฑ0)` - // Minimum execution time: 10_332_000 picoseconds. - Weight::from_parts(10_424_000, 3985) - // Standard Error: 117_754 - .saturating_add(Weight::from_parts(242_191_645, 0).saturating_mul(r.into())) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// The range of component `t` is `[0, 1]`. + /// The range of component `i` is `[0, 1048576]`. + fn seal_call(t: u32, i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `620 + t * (280 ยฑ0)` + // Estimated: `4085 + t * (2182 ยฑ0)` + // Minimum execution time: 157_109_000 picoseconds. + Weight::from_parts(159_458_069, 4085) + // Standard Error: 339_702 + .saturating_add(Weight::from_parts(44_066_869, 0).saturating_mul(t.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2646).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2182).saturating_mul(t.into())) } - /// Storage: `Contracts::CodeInfoOf` (r:735 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:735 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:736 w:736) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_delegate_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (527 ยฑ0)` - // Estimated: `6444 + r * (2583 ยฑ10)` - // Minimum execution time: 10_550_000 picoseconds. - Weight::from_parts(10_667_000, 6444) - // Standard Error: 147_918 - .saturating_add(Weight::from_parts(242_824_174, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `t` is `[0, 1]`. - /// The range of component `c` is `[0, 1048576]`. - fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `696 + t * (277 ยฑ0)` - // Estimated: `6636 + t * (3457 ยฑ0)` - // Minimum execution time: 213_206_000 picoseconds. - Weight::from_parts(120_511_970, 6636) - // Standard Error: 2_501_856 - .saturating_add(Weight::from_parts(40_016_645, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(420, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 3457).saturating_mul(t.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:800 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:802 w:802) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[1, 800]`. - fn seal_instantiate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1094 + r * (188 ยฑ0)` - // Estimated: `6987 + r * (2664 ยฑ0)` - // Minimum execution time: 334_708_000 picoseconds. - Weight::from_parts(346_676_000, 6987) - // Standard Error: 236_074 - .saturating_add(Weight::from_parts(330_734_734, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2664).saturating_mul(r.into())) + fn seal_delegate_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 143_384_000 picoseconds. + Weight::from_parts(147_554_000, 3895) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -2377,250 +1698,149 @@ impl WeightInfo for () { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + t * (104 ยฑ0)` - // Estimated: `6716 + t * (2549 ยฑ1)` - // Minimum execution time: 1_854_462_000 picoseconds. - Weight::from_parts(855_253_052, 6716) - // Standard Error: 13_502_046 - .saturating_add(Weight::from_parts(20_015_409, 0).saturating_mul(t.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_060, 0).saturating_mul(i.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_201, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(7_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2549).saturating_mul(t.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_384_000 picoseconds. - Weight::from_parts(10_319_961, 0) - // Standard Error: 293 - .saturating_add(Weight::from_parts(267_788, 0).saturating_mul(r.into())) + fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `676` + // Estimated: `4138` + // Minimum execution time: 1_798_243_000 picoseconds. + Weight::from_parts(82_642_573, 4138) + // Standard Error: 6_831_260 + .saturating_add(Weight::from_parts(159_867_027, 0).saturating_mul(t.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_534, 0).saturating_mul(i.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_809, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_991_000 picoseconds. - Weight::from_parts(792_256, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_071, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_keccak_256(r: u32, ) -> Weight { + fn seal_hash_sha2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_210_000 picoseconds. - Weight::from_parts(8_251_750, 0) - // Standard Error: 584 - .saturating_add(Weight::from_parts(662_961, 0).saturating_mul(r.into())) + // Minimum execution time: 875_000 picoseconds. + Weight::from_parts(904_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_994_000 picoseconds. - Weight::from_parts(6_532_799, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(3_351, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_209_000 picoseconds. - Weight::from_parts(10_895_450, 0) - // Standard Error: 195 - .saturating_add(Weight::from_parts(328_195, 0).saturating_mul(r.into())) + // Minimum execution time: 1_475_000 picoseconds. + Weight::from_parts(1_551_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3_410, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_493_000 picoseconds. - Weight::from_parts(4_721_812, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(850_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_195, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_134_000 picoseconds. - Weight::from_parts(11_712_472, 0) - // Standard Error: 316 - .saturating_add(Weight::from_parts(335_912, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(1_279, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_448_000 picoseconds. - Weight::from_parts(1_407_440, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_205, 0).saturating_mul(n.into())) + // Minimum execution time: 747_000 picoseconds. + Weight::from_parts(773_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_276, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 54_644_000 picoseconds. - Weight::from_parts(55_793_413, 0) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_511, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 160]`. - fn seal_sr25519_verify(r: u32, ) -> Weight { + fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_378_000 picoseconds. - Weight::from_parts(25_185_485, 0) - // Standard Error: 8_828 - .saturating_add(Weight::from_parts(41_091_818, 0).saturating_mul(r.into())) + // Minimum execution time: 43_154_000 picoseconds. + Weight::from_parts(45_087_558, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(4_628, 0).saturating_mul(n.into())) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_recover(r: u32, ) -> Weight { + fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_371_000 picoseconds. - Weight::from_parts(35_350_533, 0) - // Standard Error: 9_805 - .saturating_add(Weight::from_parts(45_466_060, 0).saturating_mul(r.into())) + // Minimum execution time: 47_193_000 picoseconds. + Weight::from_parts(48_514_000, 0) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { + fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_407_000 picoseconds. - Weight::from_parts(14_375_492, 0) - // Standard Error: 4_036 - .saturating_add(Weight::from_parts(11_666_630, 0).saturating_mul(r.into())) + // Minimum execution time: 13_083_000 picoseconds. + Weight::from_parts(13_218_000, 0) } - /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1535 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1537 w:1537) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_set_code_hash(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (926 ยฑ0)` - // Estimated: `8966 + r * (3047 ยฑ10)` - // Minimum execution time: 10_566_000 picoseconds. - Weight::from_parts(10_627_000, 8966) - // Standard Error: 46_429 - .saturating_add(Weight::from_parts(22_435_893, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn seal_set_code_hash() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_116_000, 3895) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `r` is `[0, 32]`. - fn lock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `274 + r * (78 ยฑ0)` - // Estimated: `1265 + r * (2553 ยฑ0)` - // Minimum execution time: 10_305_000 picoseconds. - Weight::from_parts(16_073_202, 1265) - // Standard Error: 8_841 - .saturating_add(Weight::from_parts(5_125_440, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn lock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3820` + // Minimum execution time: 9_271_000 picoseconds. + Weight::from_parts(9_640_000, 3820) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 32]`. - fn unlock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `275 + r * (78 ยฑ0)` - // Estimated: `990 + r * (2568 ยฑ0)` - // Minimum execution time: 10_389_000 picoseconds. - Weight::from_parts(16_221_879, 990) - // Standard Error: 9_409 - .saturating_add(Weight::from_parts(4_235_040, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) + fn unlock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3558` + // Minimum execution time: 8_182_000 picoseconds. + Weight::from_parts(8_343_000, 3558) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_reentrance_count(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `858 + r * (3 ยฑ0)` - // Estimated: `6804 + r * (3 ยฑ0)` - // Minimum execution time: 265_499_000 picoseconds. - Weight::from_parts(282_172_889, 6804) - // Standard Error: 442 - .saturating_add(Weight::from_parts(165_070, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + fn seal_reentrance_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(347_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_account_reentrance_count(r: u32, ) -> Weight { + fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_367_000 picoseconds. - Weight::from_parts(13_220_303, 0) - // Standard Error: 151 - .saturating_add(Weight::from_parts(86_117, 0).saturating_mul(r.into())) + // Minimum execution time: 345_000 picoseconds. + Weight::from_parts(370_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_instantiation_nonce(r: u32, ) -> Weight { + fn seal_instantiation_nonce() -> Weight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 10_223_000 picoseconds. - Weight::from_parts(14_170_002, 1704) - // Standard Error: 71 - .saturating_add(Weight::from_parts(76_372, 0).saturating_mul(r.into())) + // Minimum execution time: 2_998_000 picoseconds. + Weight::from_parts(3_221_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2628,9 +1848,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 754_000 picoseconds. - Weight::from_parts(1_091_740, 0) - // Standard Error: 29 - .saturating_add(Weight::from_parts(14_954, 0).saturating_mul(r.into())) + // Minimum execution time: 1_002_000 picoseconds. + Weight::from_parts(1_094_958, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(14_531, 0).saturating_mul(r.into())) } } -- GitLab From 03bbc17e92d1d04b6b4b9aef7669c403d08bc28c Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 23 May 2024 15:38:31 +0300 Subject: [PATCH 055/106] Define `OpaqueValue` (#4550) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Define `OpaqueValue` and use it instead of `grandpa::OpaqueKeyOwnershipProof` and `beefy:OpaqueKeyOwnershipProof` Related to https://github.com/paritytech/polkadot-sdk/pull/4522#discussion_r1608278279 We'll need to introduce a runtime API method that calls the `report_fork_voting_unsigned()` extrinsic. This method will need to receive the ancestry proof as a paramater. I'm still not sure, but there is a chance that we'll send the ancestry proof as an opaque type. So let's introduce this `OpaqueValue`. We can already use it to replace `grandpa::OpaqueKeyOwnershipProof` and `beefy:OpaqueKeyOwnershipProof` and maybe we'll need it for the ancestry proof as well. --------- Co-authored-by: Bastian Kรถcher --- .../primitives/consensus/beefy/src/lib.rs | 21 +++++-------------- .../primitives/consensus/grandpa/src/lib.rs | 19 ++--------------- substrate/primitives/runtime/src/lib.rs | 15 +++++++++++++ 3 files changed, 22 insertions(+), 33 deletions(-) diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 390c0ff7127..f70434beab3 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -52,7 +52,10 @@ use core::fmt::{Debug, Display}; use scale_info::TypeInfo; use sp_application_crypto::{AppCrypto, AppPublic, ByteArray, RuntimeAppPublic}; use sp_core::H256; -use sp_runtime::traits::{Hash, Keccak256, NumberFor}; +use sp_runtime::{ + traits::{Hash, Keccak256, NumberFor}, + OpaqueValue, +}; /// Key type for BEEFY module. pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::BEEFY; @@ -399,21 +402,7 @@ impl OnNewValidatorSet for () { /// the runtime API boundary this type is unknown and as such we keep this /// opaque representation, implementors of the runtime API will have to make /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. -#[derive(Decode, Encode, PartialEq, TypeInfo)] -pub struct OpaqueKeyOwnershipProof(Vec); -impl OpaqueKeyOwnershipProof { - /// Create a new `OpaqueKeyOwnershipProof` using the given encoded - /// representation. - pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { - OpaqueKeyOwnershipProof(inner) - } - - /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key - /// ownership proof type. - pub fn decode(self) -> Option { - codec::Decode::decode(&mut &self.0[..]).ok() - } -} +pub type OpaqueKeyOwnershipProof = OpaqueValue; sp_api::decl_runtime_apis! { /// API necessary for BEEFY voters. diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs index 75ed81894c2..5320c943404 100644 --- a/substrate/primitives/consensus/grandpa/src/lib.rs +++ b/substrate/primitives/consensus/grandpa/src/lib.rs @@ -31,7 +31,7 @@ use scale_info::TypeInfo; use sp_keystore::KeystorePtr; use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, - ConsensusEngineId, RuntimeDebug, + ConsensusEngineId, OpaqueValue, RuntimeDebug, }; /// The log target to be used by client code. @@ -465,22 +465,7 @@ where /// the runtime API boundary this type is unknown and as such we keep this /// opaque representation, implementors of the runtime API will have to make /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. -#[derive(Decode, Encode, PartialEq, TypeInfo)] -pub struct OpaqueKeyOwnershipProof(Vec); - -impl OpaqueKeyOwnershipProof { - /// Create a new `OpaqueKeyOwnershipProof` using the given encoded - /// representation. - pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { - OpaqueKeyOwnershipProof(inner) - } - - /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key - /// ownership proof type. - pub fn decode(self) -> Option { - codec::Decode::decode(&mut &self.0[..]).ok() - } -} +pub type OpaqueKeyOwnershipProof = OpaqueValue; sp_api::decl_runtime_apis! { /// APIs for integrating the GRANDPA finality gadget into runtimes. diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index e4e6b98ff77..046909b9a38 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -1009,6 +1009,21 @@ pub enum ExtrinsicInclusionMode { OnlyInherents, } +/// Simple blob that hold a value in an encoded form without committing to its type. +#[derive(Decode, Encode, PartialEq, TypeInfo)] +pub struct OpaqueValue(Vec); +impl OpaqueValue { + /// Create a new `OpaqueValue` using the given encoded representation. + pub fn new(inner: Vec) -> OpaqueValue { + OpaqueValue(inner) + } + + /// Try to decode this `OpaqueValue` into the given concrete type. + pub fn decode(&self) -> Option { + Decode::decode(&mut &self.0[..]).ok() + } +} + #[cfg(test)] mod tests { use crate::traits::BlakeTwo256; -- GitLab From 48d4f654612a67787426de426e462bd40f6f70f6 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Thu, 23 May 2024 23:04:41 +0200 Subject: [PATCH 056/106] Mention new XCM docs in sdk docs (#4558) The XCM docs were pretty much moved to the new rust docs format in https://github.com/paritytech/polkadot-sdk/pull/2633, with the addition of the XCM cookbook, which I plan to add more examples to shortly. These docs were not mentioned in the polkadot-sdk rust docs, this PR just mentions them there, so people can actually find them. --- Cargo.lock | 1 + docs/sdk/Cargo.toml | 1 + docs/sdk/src/polkadot_sdk/xcm.rs | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index e3a72ca23d8..55f78cc4cd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13984,6 +13984,7 @@ dependencies = [ "staging-xcm", "subkey", "substrate-wasm-builder", + "xcm-docs", ] [[package]] diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 4a4f333de79..f9812dbd044 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -100,3 +100,4 @@ sp-version = { path = "../../substrate/primitives/version" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } +xcm-docs = { path = "../../polkadot/xcm/docs" } diff --git a/docs/sdk/src/polkadot_sdk/xcm.rs b/docs/sdk/src/polkadot_sdk/xcm.rs index 5dcdc9e1de0..58f54068642 100644 --- a/docs/sdk/src/polkadot_sdk/xcm.rs +++ b/docs/sdk/src/polkadot_sdk/xcm.rs @@ -50,7 +50,7 @@ //! //! ## Get started //! -//! To learn how it works and to get started, go to the [XCM docs](https://paritytech.github.io/xcm-docs/). +//! To learn how it works and to get started, go to the [XCM docs](xcm_docs). #[cfg(test)] mod tests { -- GitLab From 700d5910580fdc17a0737925d4fe2472eb265f82 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 24 May 2024 10:43:02 +0300 Subject: [PATCH 057/106] Use polkadot-ckb-merkle-mountain-range dependency (#4562) We need to use the `polkadot-ckb-merkle-mountain-range` dependency published on `crates.io` in order to unblock the release of the `sp-mmr-primitives` crate --- Cargo.lock | 23 ++++++++++--------- .../merkle-mountain-range/Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55f78cc4cd3..a9cc4f9202a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2605,15 +2605,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "ckb-merkle-mountain-range" -version = "0.6.0" -source = "git+https://github.com/paritytech/merkle-mountain-range.git?branch=master#537f0e3f67c5adf7afff0800bbb81f02f17570a1" -dependencies = [ - "cfg-if", - "itertools 0.10.5", -] - [[package]] name = "clang-sys" version = "1.6.1" @@ -9796,7 +9787,7 @@ dependencies = [ "bp-beefy", "bp-runtime", "bp-test-utils", - "ckb-merkle-mountain-range 0.5.2", + "ckb-merkle-mountain-range", "frame-support", "frame-system", "log", @@ -12757,6 +12748,16 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-ckb-merkle-mountain-range" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4b44320e5f7ce2c18227537a3032ae5b2c476a7e8eddba45333e1011fc31b92" +dependencies = [ + "cfg-if", + "itertools 0.10.5", +] + [[package]] name = "polkadot-cli" version = "7.0.0" @@ -19674,9 +19675,9 @@ name = "sp-mmr-primitives" version = "26.0.0" dependencies = [ "array-bytes", - "ckb-merkle-mountain-range 0.6.0", "log", "parity-scale-codec", + "polkadot-ckb-merkle-mountain-range", "scale-info", "serde", "sp-api", diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 23efc1b687c..7b043355c72 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } -mmr-lib = { package = "ckb-merkle-mountain-range", git = "https://github.com/paritytech/merkle-mountain-range.git", branch = "master", default-features = false } +mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.7.0", default-features = false } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } -- GitLab From ef144b1a88c6478e5d6dac945ffe12053f05d96a Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Fri, 24 May 2024 12:01:10 +0200 Subject: [PATCH 058/106] Attempt to avoid specifying `BlockHashCount` for different `mocking::{MockBlock, MockBlockU32, MockBlockU128}` (#4543) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While doing some migration/rebase I came in to the situation, where I needed to change `mocking::MockBlock` to `mocking::MockBlockU32`: ``` #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for TestRuntime { type Block = frame_system::mocking::MockBlockU32; type AccountData = pallet_balances::AccountData; } ``` But actual `TestDefaultConfig` for `frame_system` is using `ConstU64` for `type BlockHashCount = frame_support::traits::ConstU64<10>;` [here](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame/system/src/lib.rs#L303). Because of this, it force me to specify and add override for `type BlockHashCount = ConstU32<10>`. This PR tries to fix this with `TestBlockHashCount` implementation for `TestDefaultConfig` which supports `u32`, `u64` and `u128` as a `BlockNumber`. ### How to simulate error Just by removing `type BlockHashCount = ConstU32<250>;` [here](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame/multisig/src/tests.rs#L44) ``` :~/parity/olkadot-sdk$ cargo test -p pallet-multisig Compiling pallet-multisig v28.0.0 (/home/bparity/parity/aaa/polkadot-sdk/substrate/frame/multisig) error[E0277]: the trait bound `ConstU64<10>: frame_support::traits::Get` is not satisfied --> substrate/frame/multisig/src/tests.rs:41:1 | 41 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `frame_support::traits::Get` is not implemented for `ConstU64<10>` | = help: the following other types implement trait `frame_support::traits::Get`: as frame_support::traits::Get> as frame_support::traits::Get>> note: required by a bound in `frame_system::Config::BlockHashCount` --> /home/bparity/parity/aaa/polkadot-sdk/substrate/frame/system/src/lib.rs:535:24 | 535 | type BlockHashCount: Get>; | ^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `Config::BlockHashCount` = note: this error originates in the attribute macro `derive_impl` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) For more information about this error, try `rustc --explain E0277`. error: could not compile `pallet-multisig` (lib test) due to 1 previous error ``` ## For reviewers: (If there is a better solution, please let me know!) The first commit contains actual attempt to fix the problem: https://github.com/paritytech/polkadot-sdk/commit/3c5499e539f2218503fbd6ce9be085b03c31ee13. The second commit is just removal of `BlockHashCount` from all other places where not needed by default. Closes: https://github.com/paritytech/polkadot-sdk/issues/1657 --------- Co-authored-by: Bastian Kรถcher --- bridges/bin/runtime-common/src/mock.rs | 1 - .../pallets/inbound-queue/src/mock.rs | 5 ----- .../pallets/outbound-queue/src/mock.rs | 5 ----- bridges/snowbridge/pallets/system/src/mock.rs | 3 +-- cumulus/pallets/collator-selection/src/mock.rs | 2 -- cumulus/pallets/parachain-system/src/mock.rs | 2 -- cumulus/pallets/xcmp-queue/src/mock.rs | 2 -- cumulus/parachains/common/src/impls.rs | 2 -- .../pallets/collective-content/src/mock.rs | 5 +---- .../runtime/common/src/assigned_slots/mod.rs | 5 ----- polkadot/runtime/common/src/auctions.rs | 5 ----- polkadot/runtime/common/src/crowdloan/mod.rs | 5 ----- polkadot/runtime/common/src/impls.rs | 2 -- .../runtime/common/src/integration_tests.rs | 2 -- .../runtime/common/src/paras_registrar/mod.rs | 2 -- polkadot/runtime/common/src/purchase.rs | 5 ----- polkadot/runtime/common/src/slots/mod.rs | 5 ----- polkadot/runtime/parachains/src/mock.rs | 2 -- polkadot/xcm/pallet-xcm/src/mock.rs | 5 ----- polkadot/xcm/xcm-builder/src/tests/pay/mock.rs | 1 - polkadot/xcm/xcm-builder/tests/mock/mod.rs | 5 ----- substrate/frame/alliance/src/mock.rs | 1 - .../contracts/mock-network/src/parachain.rs | 5 ----- .../contracts/mock-network/src/relay_chain.rs | 5 ----- .../test-staking-e2e/src/mock.rs | 1 - substrate/frame/examples/basic/src/tests.rs | 1 - substrate/frame/examples/dev-mode/src/tests.rs | 1 - .../examples/offchain-worker/src/tests.rs | 1 - substrate/frame/im-online/src/mock.rs | 1 - substrate/frame/indices/src/mock.rs | 1 - substrate/frame/multisig/src/tests.rs | 1 - substrate/frame/offences/src/mock.rs | 3 +-- substrate/frame/paged-list/src/mock.rs | 6 +----- substrate/frame/safe-mode/src/mock.rs | 1 - .../frame/state-trie-migration/src/lib.rs | 7 +------ .../frame/support/test/tests/final_keys.rs | 3 +-- .../frame/support/test/tests/genesisconfig.rs | 3 +-- substrate/frame/support/test/tests/instance.rs | 1 - .../frame/support/test/tests/issue2219.rs | 1 - substrate/frame/support/test/tests/origin.rs | 2 -- substrate/frame/support/test/tests/pallet.rs | 1 - .../support/test/tests/pallet_instance.rs | 1 - .../test/tests/pallet_outer_enums_explicit.rs | 3 +-- .../test/tests/pallet_outer_enums_implicit.rs | 3 +-- .../tests/pallet_ui/pass/dev_mode_valid.rs | 1 - substrate/frame/support/test/tests/runtime.rs | 3 +-- .../test/tests/runtime_legacy_ordering.rs | 3 +-- .../support/test/tests/runtime_metadata.rs | 1 - .../frame/support/test/tests/storage_layers.rs | 1 - .../support/test/tests/storage_transaction.rs | 3 +-- .../support/test/tests/versioned_migration.rs | 2 -- substrate/frame/system/benches/bench.rs | 6 +----- substrate/frame/system/src/lib.rs | 18 +++++++++++++++--- .../asset-conversion-tx-payment/src/mock.rs | 1 - .../asset-tx-payment/src/mock.rs | 1 - .../frame/transaction-payment/src/mock.rs | 1 - .../frame/transaction-storage/src/mock.rs | 3 +-- substrate/frame/tx-pause/src/mock.rs | 4 ---- .../parachain/pallets/template/src/mock.rs | 2 -- .../solochain/pallets/template/src/mock.rs | 6 +----- 60 files changed, 30 insertions(+), 150 deletions(-) diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index e323f1edfc7..f4947466789 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -148,7 +148,6 @@ impl frame_system::Config for TestRuntime { type AccountId = ThisChainAccountId; type Block = ThisChainBlock; type AccountData = pallet_balances::AccountData; - type BlockHashCount = ConstU32<250>; } impl pallet_utility::Config for TestRuntime { diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index c96c868bc26..05481ca2f6b 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -43,10 +43,6 @@ frame_support::construct_runtime!( pub type Signature = MultiSignature; pub type AccountId = <::Signer as IdentifyAccount>::AccountId; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - type Balance = u128; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -60,7 +56,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type Nonce = u64; diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 5eeeeead140..d65a96e2702 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -33,10 +33,6 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; @@ -48,7 +44,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type PalletInfo = PalletInfo; type Nonce = u64; type Block = Block; diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 687072a49e2..d7fc4152b37 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -3,7 +3,7 @@ use crate as snowbridge_system; use frame_support::{ derive_impl, parameter_types, - traits::{tokens::fungible::Mutate, ConstU128, ConstU64, ConstU8}, + traits::{tokens::fungible::Mutate, ConstU128, ConstU8}, weights::IdentityFee, PalletId, }; @@ -106,7 +106,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type Nonce = u64; diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 4a440dfe1e9..196184d6278 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -46,7 +46,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub const SS58Prefix: u8 = 42; } @@ -65,7 +64,6 @@ impl system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index e8d2eb70e26..da904c0079a 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -55,7 +55,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub Version: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("test"), impl_name: sp_version::create_runtime_str!("system-test"), @@ -74,7 +73,6 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockHashCount = BlockHashCount; type Version = Version; type OnSetCode = ParachainSetCode; } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 97121aa78e9..dd87e07c33f 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -52,7 +52,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub const SS58Prefix: u8 = 42; } @@ -73,7 +72,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index d70fdfeb709..ed9c5c483fa 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -222,7 +222,6 @@ mod tests { ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub BlockLength: limits::BlockLength = limits::BlockLength::max(2 * 1024); pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const MaxReserves: u32 = 50; @@ -240,7 +239,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockLength = BlockLength; type BlockWeights = (); type DbWeight = (); diff --git a/cumulus/parachains/pallets/collective-content/src/mock.rs b/cumulus/parachains/pallets/collective-content/src/mock.rs index 5cb0126425e..91f9c29933d 100644 --- a/cumulus/parachains/pallets/collective-content/src/mock.rs +++ b/cumulus/parachains/pallets/collective-content/src/mock.rs @@ -18,9 +18,7 @@ pub use crate as pallet_collective_content; use crate::WeightInfo; use frame_support::{ - derive_impl, ord_parameter_types, parameter_types, - traits::{ConstU32, ConstU64}, - weights::Weight, + derive_impl, ord_parameter_types, parameter_types, traits::ConstU32, weights::Weight, }; use frame_system::EnsureSignedBy; use sp_runtime::{traits::IdentityLookup, BuildStorage}; @@ -70,7 +68,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 9b24b99cfbe..92a8e46f5f9 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -671,10 +671,6 @@ mod tests { type OverarchingCall = RuntimeCall; } - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -689,7 +685,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index aa4caac96f1..e7b7c081ae4 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -699,10 +699,6 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -718,7 +714,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 477530467fa..0aecbcd531c 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -890,10 +890,6 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - type BlockNumber = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -911,7 +907,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index 85531e9c04f..a92a05219cf 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -276,7 +276,6 @@ mod tests { ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() .base_block(Weight::from_parts(10, 0)) .for_class(DispatchClass::all(), |weight| { @@ -302,7 +301,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockLength = BlockLength; type BlockWeights = BlockWeights; type DbWeight = (); diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 3e9ac1fc1b1..2122e75f3e2 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -109,7 +109,6 @@ where use crate::{auctions::Error as AuctionsError, crowdloan::Error as CrowdloanError}; parameter_types! { - pub const BlockHashCount: u32 = 250; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( Weight::from_parts(4 * 1024 * 1024, u64::MAX), @@ -131,7 +130,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index a49ebab3e26..c90802a4012 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -761,7 +761,6 @@ mod tests { const NORMAL_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { - pub const BlockHashCount: u32 = 250; pub BlockWeights: limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); pub BlockLength: limits::BlockLength = @@ -780,7 +779,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type DbWeight = (); type BlockWeights = BlockWeights; type BlockLength = BlockLength; diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index b90bbb3a7cf..3920a2c68c5 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -508,10 +508,6 @@ mod tests { type AccountId = AccountId32; - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -527,7 +523,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 738569ff441..9da345beea3 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -525,10 +525,6 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -543,7 +539,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index a32c9d11b36..75b835b1754 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -100,7 +100,6 @@ where } parameter_types! { - pub const BlockHashCount: u32 = 250; pub static BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( Weight::from_parts(4 * 1024 * 1024, u64::MAX), @@ -125,7 +124,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b3b7529217f..ead98e1d046 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -238,10 +238,6 @@ impl SendXcm for TestPaidForPara3000SendXcm { } } -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type RuntimeOrigin = RuntimeOrigin; @@ -253,7 +249,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 34b204b434d..076ff4184f0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -54,7 +54,6 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; type AccountId = AccountId; - type BlockHashCount = ConstU32<256>; type Lookup = sp_runtime::traits::IdentityLookup; } diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 45bfba23556..7f7ff17e211 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -74,10 +74,6 @@ pub type TestXcmRouter = EnsureDecodableXcm; pub const UNITS: Balance = 1_000_000_000_000; pub const CENTS: Balance = UNITS / 30_000; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; @@ -89,7 +85,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 7116e69efa1..a9cfd6d0fde 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -42,7 +42,6 @@ type BlockNumber = u64; type AccountId = u64; parameter_types! { - pub const BlockHashCount: BlockNumber = 250; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::MAX); } diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index b46d7df6c2b..f35846ba32c 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -49,10 +49,6 @@ use xcm_executor::{traits::JustTry, Config, XcmExecutor}; pub type SovereignAccountOf = (AccountId32Aliases, ParentIsPreset); -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; @@ -64,7 +60,6 @@ impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index 36a7de499ba..8829fff3d04 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -43,10 +43,6 @@ use super::{ primitives::{AccountId, Balance}, }; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; @@ -58,7 +54,6 @@ impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index a9512bef2d5..e5987ec33f0 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -90,7 +90,6 @@ pub(crate) type Moment = u32; impl frame_system::Config for Runtime { type Block = Block; type AccountData = pallet_balances::AccountData; - type BlockHashCount = ConstU32<10>; } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index de37bcf7556..d351b27eecd 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -60,7 +60,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index 1c79b5f5fa6..e8a18ec13fe 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -54,7 +54,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index 3525b3b67ed..e2c57a8c1e1 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -61,7 +61,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index cc448dc1ae1..2aff9a0e26d 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -127,7 +127,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 9f8bf8c3758..87b8d79a7f8 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -53,7 +53,6 @@ impl frame_system::Config for Test { type Lookup = Indices; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index 0d73e3db661..cfdd33f7dfc 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -41,7 +41,6 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockHashCount = ConstU32<250>; type AccountData = pallet_balances::AccountData; // This pallet wishes to overwrite this. type BaseCallFilter = TestBaseCallFilter; diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 9a3120e41ea..1725f4158d3 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -24,7 +24,7 @@ use crate::Config; use codec::Encode; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64}, + traits::ConstU32, weights::{constants::RocksDbWeight, Weight}, }; use sp_core::H256; @@ -88,7 +88,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index 5d06170aae7..e086b4ba2b2 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -20,10 +20,7 @@ #![cfg(feature = "std")] use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; -use frame_support::{ - derive_impl, - traits::{ConstU16, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU16}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -56,7 +53,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index fbfc16f4aa2..0beb911267d 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -47,7 +47,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 5c54c27966c..4ec649f9080 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1103,11 +1103,7 @@ mod benchmarks { mod mock { use super::*; use crate as pallet_state_trie_migration; - use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, Hooks}, - weights::Weight, - }; + use frame_support::{derive_impl, parameter_types, traits::Hooks, weights::Weight}; use frame_system::{EnsureRoot, EnsureSigned}; use sp_core::{ storage::{ChildInfo, StateVersion}, @@ -1134,7 +1130,6 @@ mod mock { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockHashCount = ConstU32<250>; type AccountData = pallet_balances::AccountData; } diff --git a/substrate/frame/support/test/tests/final_keys.rs b/substrate/frame/support/test/tests/final_keys.rs index a777c20a1e9..64f56d52003 100644 --- a/substrate/frame/support/test/tests/final_keys.rs +++ b/substrate/frame/support/test/tests/final_keys.rs @@ -19,7 +19,7 @@ use codec::Encode; use frame_support::{derive_impl, storage::unhashed, StoragePrefixedMap}; use frame_system::pallet_prelude::BlockNumberFor; -use sp_core::{sr25519, ConstU32}; +use sp_core::sr25519; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, @@ -213,7 +213,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/genesisconfig.rs b/substrate/frame/support/test/tests/genesisconfig.rs index a82425cf6be..0673bcfdff3 100644 --- a/substrate/frame/support/test/tests/genesisconfig.rs +++ b/substrate/frame/support/test/tests/genesisconfig.rs @@ -17,7 +17,7 @@ use frame_support::derive_impl; use frame_system::pallet_prelude::BlockNumberFor; -use sp_core::{sr25519, ConstU32}; +use sp_core::sr25519; use sp_runtime::{ generic, traits::{BlakeTwo256, Verify}, @@ -83,7 +83,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/instance.rs b/substrate/frame/support/test/tests/instance.rs index 332f5725e05..30b8338bc5c 100644 --- a/substrate/frame/support/test/tests/instance.rs +++ b/substrate/frame/support/test/tests/instance.rs @@ -293,7 +293,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/issue2219.rs b/substrate/frame/support/test/tests/issue2219.rs index 1542c4a6c43..20c2773406f 100644 --- a/substrate/frame/support/test/tests/issue2219.rs +++ b/substrate/frame/support/test/tests/issue2219.rs @@ -165,7 +165,6 @@ pub type Block = generic::Block; impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU64<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/origin.rs b/substrate/frame/support/test/tests/origin.rs index a25c575cc51..4f14bda184c 100644 --- a/substrate/frame/support/test/tests/origin.rs +++ b/substrate/frame/support/test/tests/origin.rs @@ -23,7 +23,6 @@ use frame_support::{ derive_impl, traits::{Contains, OriginTrait}, }; -use sp_core::ConstU32; use sp_runtime::{generic, traits::BlakeTwo256}; mod nested { @@ -174,7 +173,6 @@ frame_support::construct_runtime!( impl frame_system::Config for RuntimeOriginTest { type BaseCallFilter = BaseCallFilter; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index f41e606ad7c..c441d4c371a 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -705,7 +705,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); type DbWeight = (); diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index c79cdf93e97..dfe4caa476d 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -308,7 +308,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); type DbWeight = (); diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 6c71b544426..326f3530e26 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{derive_impl, traits::ConstU32}; +use frame_support::derive_impl; mod common; @@ -29,7 +29,6 @@ pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 79828119742..4149c4880cc 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{derive_impl, traits::ConstU32}; +use frame_support::derive_impl; mod common; @@ -29,7 +29,6 @@ pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs index e4ea094d069..3386632c13a 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -82,7 +82,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); type DbWeight = (); diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index 7c2a8139a13..1f4d9110a24 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -27,7 +27,7 @@ use frame_support::{ }; use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_core::{sr25519, ConstU64}; +use sp_core::sr25519; use sp_runtime::{ generic, traits::{BlakeTwo256, ValidateUnsigned, Verify}, @@ -351,7 +351,6 @@ impl frame_system::Config for Runtime { type PalletInfo = PalletInfo; type OnSetCode = (); type Block = Block; - type BlockHashCount = ConstU64<10>; } impl module1::Config for Runtime { diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 4c7012dca14..5b74cc172c6 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -27,7 +27,7 @@ use frame_support::{ }; use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_core::{sr25519, ConstU64}; +use sp_core::sr25519; use sp_runtime::{ generic, traits::{BlakeTwo256, ValidateUnsigned, Verify}, @@ -351,7 +351,6 @@ impl frame_system::Config for Runtime { type PalletInfo = PalletInfo; type OnSetCode = (); type Block = Block; - type BlockHashCount = ConstU64<10>; } impl module1::Config for Runtime { diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 819ec176d2b..48e4d975eb0 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -42,7 +42,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/support/test/tests/storage_layers.rs b/substrate/frame/support/test/tests/storage_layers.rs index caa125153e9..0e8ef668531 100644 --- a/substrate/frame/support/test/tests/storage_layers.rs +++ b/substrate/frame/support/test/tests/storage_layers.rs @@ -78,7 +78,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/substrate/frame/support/test/tests/storage_transaction.rs b/substrate/frame/support/test/tests/storage_transaction.rs index a5bbfd24ab0..7f66a43b9af 100644 --- a/substrate/frame/support/test/tests/storage_transaction.rs +++ b/substrate/frame/support/test/tests/storage_transaction.rs @@ -24,7 +24,7 @@ use frame_support::{ storage::{with_transaction, TransactionOutcome::*}, transactional, }; -use sp_core::{sr25519, ConstU32}; +use sp_core::sr25519; use sp_io::TestExternalities; use sp_runtime::{ generic, @@ -91,7 +91,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/versioned_migration.rs b/substrate/frame/support/test/tests/versioned_migration.rs index e7d146940cb..c83dd6b71de 100644 --- a/substrate/frame/support/test/tests/versioned_migration.rs +++ b/substrate/frame/support/test/tests/versioned_migration.rs @@ -27,7 +27,6 @@ use frame_support::{ weights::constants::RocksDbWeight, }; use frame_system::Config; -use sp_core::ConstU64; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -75,7 +74,6 @@ construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU64<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/system/benches/bench.rs b/substrate/frame/system/benches/bench.rs index 87c5581b2a3..b3029630409 100644 --- a/substrate/frame/system/benches/bench.rs +++ b/substrate/frame/system/benches/bench.rs @@ -16,10 +16,7 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::{ - derive_impl, - traits::{ConstU32, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU32}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -75,7 +72,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 7ed954d83aa..84d00a1e917 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -262,7 +262,19 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::{inject_runtime_type, DefaultConfig}; - use frame_support::derive_impl; + use frame_support::{derive_impl, traits::Get}; + + /// A predefined adapter that covers `BlockNumberFor` for `Config::Block::BlockNumber` of + /// the types `u32`, `u64`, and `u128`. + /// + /// NOTE: Avoids overriding `BlockHashCount` when using `mocking::{MockBlock, MockBlockU32, + /// MockBlockU128}`. + pub struct TestBlockHashCount>(sp_std::marker::PhantomData); + impl, C: Get> Get for TestBlockHashCount { + fn get() -> I { + C::get().into() + } + } /// Provides a viable default config that can be used with /// [`derive_impl`](`frame_support::derive_impl`) to derive a testing pallet config @@ -300,7 +312,7 @@ pub mod pallet { #[inject_runtime_type] type RuntimeTask = (); type BaseCallFilter = frame_support::traits::Everything; - type BlockHashCount = frame_support::traits::ConstU64<10>; + type BlockHashCount = TestBlockHashCount>; type OnSetCode = (); type SingleBlockMigrations = (); type MultiBlockMigrator = (); @@ -397,7 +409,7 @@ pub mod pallet { /// Maximum number of block number to block hash mappings to keep (oldest pruned first). /// Using 256 as default. - type BlockHashCount = frame_support::traits::ConstU32<256>; + type BlockHashCount = TestBlockHashCount>; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index 9a2b22b8170..0cafb35d52e 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -100,7 +100,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index b04d4ffd9e0..f27fcd53fec 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -86,7 +86,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index c1bb05ab5c7..1ef95128f2a 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -85,7 +85,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs index f1e9e0591f6..73174b73dba 100644 --- a/substrate/frame/transaction-storage/src/mock.rs +++ b/substrate/frame/transaction-storage/src/mock.rs @@ -23,7 +23,7 @@ use crate::{ }; use frame_support::{ derive_impl, - traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, + traits::{ConstU32, OnFinalize, OnInitialize}, }; use sp_runtime::{traits::IdentityLookup, BuildStorage}; @@ -44,7 +44,6 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; type AccountId = u64; - type BlockHashCount = ConstU64<250>; type Lookup = IdentityLookup; } diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 5206023838b..7245fe7d5d7 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -33,9 +33,6 @@ use sp_runtime::{ BuildStorage, }; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; @@ -50,7 +47,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type Block = Block; - type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index 8a88be3e3e9..9a907f61660 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -18,7 +18,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub const SS58Prefix: u8 = 42; } @@ -37,7 +36,6 @@ impl system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/templates/solochain/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs index 3f1fd2dd6d4..09081dae062 100644 --- a/templates/solochain/pallets/template/src/mock.rs +++ b/templates/solochain/pallets/template/src/mock.rs @@ -1,8 +1,5 @@ use crate as pallet_template; -use frame_support::{ - derive_impl, - traits::{ConstU16, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU16}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -35,7 +32,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); -- GitLab From 49bd6a6e94b8b6f4ef3497e930cfb493b8ec0fd0 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 24 May 2024 13:55:58 +0200 Subject: [PATCH 059/106] Remove litep2p git dependency (#4560) @serban300 could you please do the same for the MMR crate? Am not sure what commit was released since there are no release tags in the repo. --------- Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 5 +++-- substrate/client/network/Cargo.toml | 2 +- substrate/client/network/types/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9cc4f9202a..0bdbcfd02eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8065,8 +8065,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.3.0" -source = "git+https://github.com/paritytech/litep2p?rev=e03a6023882db111beeb24d8c0ceaac0721d3f0f#e03a6023882db111beeb24d8c0ceaac0721d3f0f" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adf107268459b653df189050c9ae2301253b9c62ceafa993dc69dad29870155c" dependencies = [ "async-trait", "bs58 0.4.0", diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 5a469469539..b06d9c73540 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -59,7 +59,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } wasm-timer = "0.2" -litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } +litep2p = "0.4.0" once_cell = "1.18.0" void = "1.0.2" schnellru = "0.2.1" diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index f9d9330a439..ed89eca2dd1 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sc-network-types" bs58 = "0.5.0" ed25519-dalek = "2.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } +litep2p = "0.4.0" multiaddr = "0.17.0" multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } rand = "0.8.5" -- GitLab From 1c7a1a58f0c9591014b271d645c18811227bead1 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 24 May 2024 15:19:20 +0200 Subject: [PATCH 060/106] Polkadot-SDK Umbrella Crate (#3935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Umbrella Crate The Polkadot-SDK "umbrella" is a crate that re-exports all other published crates. This makes it possible to have a very small `Cargo.toml` file that only has one dependency, the umbrella crate. This helps with selecting the right combination of crate versions, since otherwise 3rd party tools are needed to select a compatible set of versions. ## Features The umbrella crate supports no-std builds and can therefore be used in the runtime and node. There are two main features: `runtime` and `node`. The `runtime` feature enables all `no-std` crates, while the `node` feature enables all `std` crates. It should be used like any other crate in the repo, with `default-features = false`. For more fine-grained control, additionally, each crate can be enabled selectively. The umbrella exposes one feature per dependency. For example, if you only want to use the `frame-support` crate, you can enable the `frame-support` feature. The umbrella exposes a few more general features: - `tuples-96`: Needs to be enabled for runtimes that have more than 64 pallets. - `serde`: Specifically enable `serde` en/decoding support. - `experimental`: Experimental enable experimental features - should not yet used in production. - `with-tracing`: Enable tracing support. - `try-runtime`, `runtime-benchmarks` and `std`: These follow the standard conventions. - `runtime`: As described above, enable all `no-std` crates. - `node`: As described above, enable all `std` crates. - There does *not* exist a dedicated docs feature. To generate docs, enable the `runtime` and `node` feature. For docs.rs the manifest contains specific configuration to make it show up all re-exports. There is a specific `zepter` check in place to ensure that the features of the umbrella are correctly configured. This check is run in CI and locally when running `zepter`. ## Generation The umbrella crate needs to be updated every time when a new crate is added or removed from the workspace. It is checked in CI by calling its generation script. The generation script is located in `./scripts/generate-umbrella.py` and needs dependency `cargo_workspace`. Example: `python3 scripts/generate-umbrella.py --sdk . --version 1.9.0` ## Usage > Note: You can see a live example in the `staging-node-cli` and `kitchensink-runtime` crates. The umbrella crate can be added to your runtime crate like this: `polkadot-sdk = { path = "../../../../umbrella", features = ["runtime"], default-features = false}` or for a node: `polkadot-sdk = { path = "../../../../umbrella", features = ["node"], default-features = false }` In the code, it is then possible to bring all dependencies into scope via: `use polkadot_sdk::*;` ### Known Issues The only known issue so far is the fact that the `use` statement brings the dependencies only into the outer module scope - not the global crate scope. For example, the following code would need to be adjusted: ```rust use polkadot_sdk::*; mod foo { // This does sadly not compile: frame_support::parameter_types! { } // Instead, we need to do this (or add an equivalent `use` statement): polkadot_sdk::frame_support::parameter_types! { } } ``` Apart from this, no issues are known. There could be some bugs with how macros locate their own re-exports. Please compile issues that arise from using this crate. ## Dependencies The umbrella crate re-exports all published crates, with a few exceptions: - Runtime crates like `rococo-runtime` etc are not exported. This otherwise leads to very weird compile errors and should not be needed anyway. - Example and fuzzing crates are not exported. This is currently detected by checking the name of the crate for these magic words. In the future, it will utilize custom metadata, as it is done in the `rococo-runtime` crate. - The umbrella crate itself. Should be obvious :) ## Follow Ups - [ ]ย Re-writing the generator in Rust - the python script is at its limit. - [ ] Using custom metadata to exclude some crates instead of filtering by names. - [ ] Finding a way to setting the version properly. Currently its locked in the CI script. --------- Signed-off-by: Oliver Tale-Yazdi --- .config/lychee.toml | 1 + .config/zepter.yaml | 6 +- .github/workflows/check-features.yml | 2 +- .github/workflows/checks-quick.yml | 30 + Cargo.lock | 595 ++-- Cargo.toml | 14 +- .../src/validate_block/implementation.rs | 2 + docs/sdk/Cargo.toml | 1 + docs/sdk/src/reference_docs/mod.rs | 3 + docs/sdk/src/reference_docs/umbrella_crate.rs | 89 + .../runtime/parachains/src/inclusion/mod.rs | 1 + prdoc/pr_3935.prdoc | 30 + scripts/generate-umbrella.py | 204 ++ substrate/bin/node/cli/Cargo.toml | 164 +- .../bin/node/cli/benches/block_production.rs | 2 + substrate/bin/node/cli/benches/executor.rs | 2 + .../bin/node/cli/benches/transaction_pool.rs | 1 + substrate/bin/node/cli/bin/main.rs | 1 + substrate/bin/node/cli/build.rs | 4 +- substrate/bin/node/cli/src/benchmarking.rs | 2 + substrate/bin/node/cli/src/chain_spec.rs | 6 +- substrate/bin/node/cli/src/cli.rs | 2 + substrate/bin/node/cli/src/command.rs | 4 +- substrate/bin/node/cli/src/service.rs | 5 +- substrate/bin/node/cli/tests/basic.rs | 1 + substrate/bin/node/cli/tests/common.rs | 3 +- substrate/bin/node/cli/tests/fees.rs | 1 + .../bin/node/cli/tests/submit_transaction.rs | 1 + substrate/bin/node/runtime/Cargo.toml | 385 +-- substrate/bin/node/runtime/src/assets_api.rs | 2 + substrate/bin/node/runtime/src/impls.rs | 3 + substrate/bin/node/runtime/src/lib.rs | 8 +- .../client/chain-spec/derive/src/impls.rs | 25 +- .../solution-type/src/lib.rs | 11 +- .../frame/staking/reward-curve/src/lib.rs | 8 +- .../frame/support/procedural/tools/src/lib.rs | 18 + .../primitives/api/proc-macro/src/utils.rs | 4 + umbrella/Cargo.toml | 2449 +++++++++++++++++ umbrella/src/lib.rs | 1564 +++++++++++ 39 files changed, 4907 insertions(+), 747 deletions(-) create mode 100644 docs/sdk/src/reference_docs/umbrella_crate.rs create mode 100644 prdoc/pr_3935.prdoc create mode 100644 scripts/generate-umbrella.py create mode 100644 umbrella/Cargo.toml create mode 100644 umbrella/src/lib.rs diff --git a/.config/lychee.toml b/.config/lychee.toml index b7b5b83f35b..1de9fcd559d 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -51,5 +51,6 @@ exclude = [ "https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/", # 403 rate limited: "https://etherscan.io/block/11090290", + "https://subscan.io/", "https://substrate.stackexchange.com/.*", ] diff --git a/.config/zepter.yaml b/.config/zepter.yaml index f701392d16b..9b3bd9d618c 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -25,9 +25,13 @@ workflows: '--show-path', '--quiet', ] - # Same as `check`, but with the `--fix` flag. + # The umbrella crate uses more features, so we to check those too: + check_umbrella: + - [ $check.0, '--features=serde,experimental,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ] + # Same as `check_*`, but with the `--fix` flag. default: - [ $check.0, '--fix' ] + - [ $check_umbrella.0, '--fix' ] # Will be displayed when any workflow fails: help: diff --git a/.github/workflows/check-features.yml b/.github/workflows/check-features.yml index 53d6ac6b4db..d34b3d52c53 100644 --- a/.github/workflows/check-features.yml +++ b/.github/workflows/check-features.yml @@ -13,7 +13,7 @@ jobs: - name: Check uses: hack-ink/cargo-featalign-action@bea88a864d6ca7d0c53c26f1391ce1d431dc7f34 # v0.1.1 with: - crate: substrate/bin/node/runtime + crate: templates/parachain/runtime/ features: std,runtime-benchmarks,try-runtime ignore: sc-executor default-std: true diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 217adf40a39..3888928311a 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -116,3 +116,33 @@ jobs: run: | echo "Checking markdown formatting. More info: docs/contributor/markdown_linting.md" markdownlint --config "$CONFIG" --ignore target . + check-umbrella: + runs-on: arc-runners-polkadot-sdk + timeout-minutes: 10 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) + - name: install python deps + run: | + sudo apt-get update && sudo apt-get install -y python3-pip python3 + pip3 install "cargo-workspace>=1.2.4" toml + - name: check umbrella correctness + run: | + python3 scripts/generate-umbrella.py --sdk . --version 0.1.0 + cargo +nightly fmt --all + if [ -n "$(git status --porcelain)" ]; then + cat <, @@ -186,6 +187,7 @@ where ) .expect("Invalid relay chain state proof"); + #[allow(deprecated)] let res = CI::check_inherents(&block, &relay_chain_proof); if !res.ok() { diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index f9812dbd044..a8c873be556 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -30,6 +30,7 @@ simple-mermaid = "0.1.1" docify = "0.2.8" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. +polkadot-sdk = { path = "../../umbrella", features = ["runtime"] } node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 145df8844f2..6fa25bf36e1 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -106,3 +106,6 @@ pub mod frame_offchain_workers; /// Learn about the different ways through which multiple [`frame`] pallets can be combined to work /// together. pub mod frame_pallet_coupling; + +/// Learn about the Polkadot Umbrella crate that re-exports all other crates. +pub mod umbrella_crate; diff --git a/docs/sdk/src/reference_docs/umbrella_crate.rs b/docs/sdk/src/reference_docs/umbrella_crate.rs new file mode 100644 index 00000000000..9751b0ad5ad --- /dev/null +++ b/docs/sdk/src/reference_docs/umbrella_crate.rs @@ -0,0 +1,89 @@ +//! # Umbrella Crate +//! +//! The Polkadot-SDK "umbrella" is a crate that re-exports all other published crates. This makes it +//! possible to have a very small `Cargo.toml` file that only has one dependency, the umbrella +//! crate. This helps with selecting the right combination of crate versions, since otherwise 3rd +//! party tools are needed to select a compatible set of versions. +//! +//! ## Features +//! +//! The umbrella crate supports no-std builds and can therefore be used in the runtime and node. +//! There are two main features: `runtime` and `node`. The `runtime` feature enables all `no-std` +//! crates, while the `node` feature enables all `std` crates. It should be used like any other +//! crate in the repo, with `default-features = false`. +//! +//! For more fine-grained control, additionally, each crate can be enabled selectively. The umbrella +//! exposes one feature per dependency. For example, if you only want to use the `frame-support` +//! crate, you can enable the `frame-support` feature. +//! +//! The umbrella exposes a few more general features: +//! - `tuples-96`: Needs to be enabled for runtimes that have more than 64 pallets. +//! - `serde`: Specifically enable `serde` en/decoding support. +//! - `experimental`: Experimental enable experimental features - should not yet used in production. +//! - `with-tracing`: Enable tracing support. +//! - `try-runtime`, `runtime-benchmarks` and `std`: These follow the standard conventions. +//! - `runtime`: As described above, enable all `no-std` crates. +//! - `node`: As described above, enable all `std` crates. +//! - There does *not* exist a dedicated docs feature. To generate docs, enable the `runtime` and +//! `node` feature. For docs.rs the manifest contains specific configuration to make it show up +//! all re-exports. +//! +//! There is a specific `zepter` check in place to ensure that the features of the umbrella are +//! correctly configured. This check is run in CI and locally when running `zepter`. +//! +//! ## Generation +//! +//! The umbrella crate needs to be updated every time when a new crate is added or removed from the +//! workspace. It is checked in CI by calling its generation script. The generation script is +//! located in `./scripts/generate-umbrella.py` and needs dependency `cargo_workspace`. +//! +//! Example: `python3 scripts/generate-umbrella.py --sdk . --version 1.9.0` +//! +//! ## Usage +//! +//! > Note: You can see a live example in the `staging-node-cli` and `kitchensink-runtime` crates. +//! +//! The umbrella crate can be added to your runtime crate like this: +//! +//! `polkadot-sdk = { path = "../../../../umbrella", features = ["runtime"], default-features = +//! false }` +//! +//! or for a node: +//! +//! `polkadot-sdk = { path = "../../../../umbrella", features = ["node"], default-features = false +//! }` +//! +//! In the code, it is then possible to bring all dependencies into scope via: +//! +//! `use polkadot_sdk::*;` +//! +//! ### Known Issues +//! +//! The only known issue so far is the fact that the `use` statement brings the dependencies only +//! into the outer module scope - not the global crate scope. For example, the following code would +//! need to be adjusted: +//! +//! ```rust +//! use polkadot_sdk::*; +//! +//! mod foo { +//! // This does sadly not compile: +//! frame_support::parameter_types! { } +//! +//! // Instead, we need to do this (or add an equivalent `use` statement): +//! polkadot_sdk::frame_support::parameter_types! { } +//! } +//! ``` +//! +//! Apart from this, no issues are known. There could be some bugs with how macros locate their own +//! re-exports. Please compile issues that arise from using this crate. +//! +//! ## Dependencies +//! +//! The umbrella crate re-exports all published crates, with a few exceptions: +//! - Runtime crates like `rococo-runtime` etc are not exported. This otherwise leads to very weird +//! compile errors and should not be needed anyway. +//! - Example and fuzzing crates are not exported. This is currently detected by checking the name +//! of the crate for these magic words. In the future, it will utilize custom metadata, as it is +//! done in the `rococo-runtime` crate. +//! - The umbrella crate itself. Should be obvious :) diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 31befefa322..0c727498408 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -421,6 +421,7 @@ impl From for AcceptanceCheckErr { /// An error returned by [`Pallet::check_upward_messages`] that indicates a violation of one of /// acceptance criteria rules. #[cfg_attr(test, derive(PartialEq))] +#[allow(dead_code)] pub(crate) enum UmpAcceptanceCheckErr { /// The maximal number of messages that can be submitted in one batch was exceeded. MoreMessagesThanPermitted { sent: u32, permitted: u32 }, diff --git a/prdoc/pr_3935.prdoc b/prdoc/pr_3935.prdoc new file mode 100644 index 00000000000..93b0fba5d99 --- /dev/null +++ b/prdoc/pr_3935.prdoc @@ -0,0 +1,30 @@ +title: "Introduce Polkadot-SDK umbrella crate" + +doc: + - audience: Runtime Dev + description: | + Introduces a new "umbrella" crate that re-exports all published crates of the Polkadot-SDK. + This helps developers to select a valid set of versions for all underlying dependencies. + + You can now use this create and remove lots of dependencies from your runtime and node crates. + The `staging-node-cli` and `kitchensink-runtime` both adopt this pattern as an example. + + Full docs in `docs/sdk/src/reference_docs/umbrella_crate.rs`. + +crates: + - name: cumulus-pallet-parachain-system + bump: patch + - name: sc-chain-spec-derive + bump: patch + - name: frame-election-provider-solution-type + bump: patch + - name: pallet-staking-reward-curve + bump: patch + - name: frame-support-procedural-tools + bump: patch + - name: sp-api-proc-macro + bump: patch + - name: polkadot-runtime-parachains + bump: patch + - name: polkadot-sdk + bump: major diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py new file mode 100644 index 00000000000..0bdf160e63b --- /dev/null +++ b/scripts/generate-umbrella.py @@ -0,0 +1,204 @@ +""" + +Creates the Polkadot-SDK umbrella crate that re-exports all other crates. + +This re-creates the `umbrella/` folder. Ensure that it does not contain any changes you want to keep. + +Usage: + python3 polkadot-sdk-umbrella-crate.py --sdk --version + +Example: + python3 polkadot-sdk-umbrella-crate.py --sdk ../polkadot-sdk --version 1.11.0 +""" + +import argparse +import os +import re +import toml +import shutil + +from cargo_workspace import Workspace + +""" +Crate names that should be excluded from the umbrella crate. +""" +def exclude(crate): + name = crate.name + if crate.metadata.get("polkadot-sdk.skip-umbrella", False): + return True + + # No fuzzers or examples: + if "example" in name or name.endswith("fuzzer"): + return True + # No runtime crates: + if name.endswith("-runtime"): + # Note: this is a bit hacky. We should use custom crate metadata instead. + return name != "sp-runtime" and name != "bp-runtime" and name != "frame-try-runtime" + + return False + +def main(path, version): + delete_umbrella(path) + workspace = Workspace.from_path(path) + print(f'Indexed {workspace}') + + std_crates = [] # name -> path. use list for sorting + nostd_crates = [] + for crate in workspace.crates: + if crate.name == 'polkadot-sdk': + continue + if not crate.publish: + print(f"Skipping {crate.name} as it is not published") + continue + + lib_path = os.path.dirname(crate.abs_path) + manifest_path = os.path.join(lib_path, "Cargo.toml") + lib_path = os.path.join(lib_path, "src", "lib.rs") + path = os.path.dirname(crate.rel_path) + + # Guess which crates support no_std. Proc-macro crates are always no_std: + with open(manifest_path, "r") as f: + manifest = toml.load(f) + if 'lib' in manifest and 'proc-macro' in manifest['lib']: + if manifest['lib']['proc-macro']: + nostd_crates.append((crate, path)) + continue + + # Crates without a lib.rs cannot be no_std + if not os.path.exists(lib_path): + print(f"Skipping {crate.name} as it does not have a 'src/lib.rs'") + continue + if exclude(crate): + print(f"Skipping {crate.name} as it is in the exclude list") + continue + + # No search for a no_std attribute: + with open(lib_path, "r") as f: + content = f.read() + if "#![no_std]" in content or '#![cfg_attr(not(feature = "std"), no_std)]' in content: + nostd_crates.append((crate, path)) + elif 'no_std' in content: + raise Exception(f"Found 'no_std' in {lib_path} without knowing how to handle it") + else: + std_crates.append((crate, path)) + + # Sort by name + std_crates.sort(key=lambda x: x[0].name) + nostd_crates.sort(key=lambda x: x[0].name) + all_crates = std_crates + nostd_crates + all_crates.sort(key=lambda x: x[0].name) + dependencies = {} + + for (crate, path) in nostd_crates: + dependencies[crate.name] = {"path": f"../{path}", "default-features": False, "optional": True} + + for (crate, path) in std_crates: + dependencies[crate.name] = {"path": f"../{path}", "default-features": False, "optional": True} + + # The empty features are filled by Zepter + features = { + "default": [ "std" ], + "std": [], + "runtime-benchmarks": [], + "try-runtime": [], + "serde": [], + "experimental": [], + "with-tracing": [], + "runtime": list([f"{d.name}" for d, _ in nostd_crates]), + "node": ["std"] + list([f"{d.name}" for d, _ in std_crates]), + "tuples-96": [], + } + + manifest = { + "package": { + "name": "polkadot-sdk", + "version": version, + "edition": { "workspace": True }, + "authors": { "workspace": True }, + "description": "Polkadot SDK umbrella crate.", + "license": "Apache-2.0", + "metadata": { "docs": { "rs": { + "features": ["runtime", "node"], + "targets": ["x86_64-unknown-linux-gnu"] + }}} + }, + "dependencies": dependencies, + "features": features, + } + + umbrella_dir = os.path.join(workspace.path, "umbrella") + manifest_path = os.path.join(umbrella_dir, "Cargo.toml") + lib_path = os.path.join(umbrella_dir, "src", "lib.rs") + # create all dir + os.makedirs(os.path.dirname(lib_path), exist_ok=True) + # Write the manifest + with open(manifest_path, "w") as f: + toml_manifest = toml.dumps(manifest) + f.write(toml_manifest) + print(f"Wrote {manifest_path}") + # and the lib.rs + with open(lib_path, "w") as f: + f.write('''// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Polkadot SDK umbrella crate re-exporting all other published crates. +//! +//! This helps to set a single version number for all your dependencies. Docs are in the +//! `polkadot-sdk-docs` crate. + +// This file is auto-generated and checked by the CI. You can edit it manually, but it must be +// exactly the way that the CI expects it. +''') + + for crate, _ in all_crates: + use = crate.name.replace("-", "_") + desc = crate.description if crate.description.endswith(".") else crate.description + "." + f.write(f'\n/// {desc}') + f.write(f'\n#[cfg(feature = "{crate.name}")]\n') + f.write(f"pub use {use};\n") + + print(f"Wrote {lib_path}") + + add_to_workspace(workspace.path) + +""" +Delete the umbrella folder and remove the umbrella crate from the workspace. +""" +def delete_umbrella(path): + umbrella_dir = os.path.join(path, "umbrella") + # remove the umbrella crate from the workspace + manifest = os.path.join(path, "Cargo.toml") + manifest = open(manifest, "r").read() + manifest = re.sub(r'\s+"umbrella",\n', "", manifest) + with open(os.path.join(path, "Cargo.toml"), "w") as f: + f.write(manifest) + if os.path.exists(umbrella_dir): + print(f"Deleting {umbrella_dir}") + shutil.rmtree(umbrella_dir) + +""" +Create the umbrella crate and add it to the workspace. +""" +def add_to_workspace(path): + manifest = os.path.join(path, "Cargo.toml") + manifest = open(manifest, "r").read() + manifest = re.sub(r'^members = \[', 'members = [\n "umbrella",', manifest, flags=re.M) + with open(os.path.join(path, "Cargo.toml"), "w") as f: + f.write(manifest) + + os.chdir(path) # hack + os.system("cargo metadata --format-version 1 > /dev/null") # update the lockfile + os.system(f"zepter") # enable the features + os.system(f"taplo format --config .config/taplo.toml Cargo.toml umbrella/Cargo.toml") + +def parse_args(): + parser = argparse.ArgumentParser(description="Create a polkadot-sdk crate") + parser.add_argument("--sdk", type=str, default="polkadot-sdk", help="Path to the polkadot-sdk crate") + parser.add_argument("--version", type=str, help="Version of the polkadot-sdk crate") + return parser.parse_args() + +if __name__ == "__main__": + args = parse_args() + main(args.sdk, args.version) diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 9c49fd7b362..929cd6a29e3 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -23,13 +23,10 @@ wasm-opt = false targets = ["x86_64-unknown-linux-gnu"] [badges] -travis-ci = { repository = "paritytech/substrate" } maintenance = { status = "actively-developed" } -is-it-maintained-issue-resolution = { repository = "paritytech/substrate" } -is-it-maintained-open-issues = { repository = "paritytech/substrate" } +is-it-maintained-issue-resolution = { repository = "paritytech/polkadot-sdk" } +is-it-maintained-open-issues = { repository = "paritytech/polkadot-sdk" } -# The same node binary as the `substrate` (defined in the workspace `Cargo.toml`) binary, -# but just exposed by this crate here. [[bin]] name = "substrate-node" path = "bin/main.rs" @@ -40,7 +37,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -array-bytes = "6.2.2" +array-bytes = "6.1" clap = { version = "4.5.3", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.12" } serde = { features = ["derive"], workspace = true, default-features = true } @@ -48,88 +45,18 @@ jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.30" log = { workspace = true, default-features = true } rand = "0.8" +serde_json = { workspace = true, default-features = true } + +# The Polkadot-SDK: +polkadot-sdk = { path = "../../../../umbrella", features = ["node"] } -# primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../primitives/consensus/beefy" } -grandpa-primitives = { package = "sp-consensus-grandpa", path = "../../../primitives/consensus/grandpa" } -sp-api = { path = "../../../primitives/api" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-genesis-builder = { path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-transaction-storage-proof = { path = "../../../primitives/transaction-storage-proof" } -sp-io = { path = "../../../primitives/io" } -sp-mixnet = { path = "../../../primitives/mixnet" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } -sp-statement-store = { path = "../../../primitives/statement-store" } - -# client dependencies -sc-client-api = { path = "../../../client/api" } -sc-chain-spec = { path = "../../../client/chain-spec" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-statement-store = { path = "../../../client/statement-store" } -sc-network = { path = "../../../client/network" } -sc-network-common = { path = "../../../client/network/common" } -sc-network-sync = { path = "../../../client/network/sync" } -sc-network-statement = { path = "../../../client/network/statement" } -sc-consensus-slots = { path = "../../../client/consensus/slots" } -sc-consensus-babe = { path = "../../../client/consensus/babe" } -beefy = { package = "sc-consensus-beefy", path = "../../../client/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../client/consensus/grandpa" } -mmr-gadget = { path = "../../../client/merkle-mountain-range" } -sc-rpc = { path = "../../../client/rpc" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-service = { path = "../../../client/service", default-features = false } -sc-telemetry = { path = "../../../client/telemetry" } -sc-executor = { path = "../../../client/executor" } -sc-authority-discovery = { path = "../../../client/authority-discovery" } -sc-mixnet = { path = "../../../client/mixnet" } -sc-sync-state-rpc = { path = "../../../client/sync-state-rpc" } -sc-sysinfo = { path = "../../../client/sysinfo" } -sc-storage-monitor = { path = "../../../client/storage-monitor" } -sc-offchain = { path = "../../../client/offchain" } - -# frame dependencies -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } -frame-system = { path = "../../../frame/system" } -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" } -pallet-assets = { path = "../../../frame/assets" } -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-im-online = { path = "../../../frame/im-online", default-features = false } -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } - -# node-specific dependencies +# Shared code between the staging node and kitchensink runtime: kitchensink-runtime = { path = "../runtime" } node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } - -# CLI-specific dependencies -sc-cli = { path = "../../../client/cli", optional = true } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } -serde_json = { workspace = true, default-features = true } [dev-dependencies] -sc-keystore = { path = "../../../client/keystore" } -sc-client-db = { path = "../../../client/db" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-babe = { path = "../../../client/consensus/babe" } -sc-consensus-epochs = { path = "../../../client/consensus/epochs" } -sc-service-test = { path = "../../../client/service/test" } -sc-block-builder = { path = "../../../client/block-builder" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } futures = "0.3.30" tempfile = "3.1.0" assert_cmd = "2.0.2" @@ -141,92 +68,39 @@ criterion = { version = "0.5.1", features = ["async_tokio"] } tokio = { version = "1.22.0", features = ["macros", "parking_lot", "time"] } tokio-util = { version = "0.7.4", features = ["compat"] } wait-timeout = "0.2" -substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } -pallet-timestamp = { path = "../../../frame/timestamp" } -substrate-cli-test-utils = { path = "../../../test-utils/cli" } - wat = "1.0" -frame-support = { path = "../../../frame/support" } -node-testing = { path = "../testing" } -pallet-balances = { path = "../../../frame/balances" } -pallet-contracts = { path = "../../../frame/contracts" } -pallet-glutton = { path = "../../../frame/glutton" } -pallet-sudo = { path = "../../../frame/sudo" } -pallet-treasury = { path = "../../../frame/treasury" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -pallet-root-testing = { path = "../../../frame/root-testing" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-runtime = { path = "../../../primitives/runtime" } serde_json = { workspace = true, default-features = true } scale-info = { version = "2.11.1", features = ["derive", "serde"] } -sp-trie = { path = "../../../primitives/trie" } -sp-state-machine = { path = "../../../primitives/state-machine" } + +# These testing-only dependencies are not exported by the Polkadot-SDK crate: +node-testing = { path = "../testing" } +substrate-cli-test-utils = { path = "../../../test-utils/cli" } +sc-service-test = { path = "../../../client/service/test" } [build-dependencies] clap = { version = "4.5.3", optional = true } clap_complete = { version = "4.0.2", optional = true } + node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } -substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true } -sc-cli = { path = "../../../client/cli", optional = true } -pallet-balances = { path = "../../../frame/balances" } -sc-storage-monitor = { path = "../../../client/storage-monitor" } + +polkadot-sdk = { path = "../../../../umbrella", features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-monitor", "substrate-build-script-utils"], optional = true } [features] default = ["cli"] cli = [ "clap", "clap_complete", - "frame-benchmarking-cli", "node-inspect", - "sc-cli", - "sc-service/rocksdb", - "substrate-build-script-utils", + "polkadot-sdk", ] runtime-benchmarks = [ - "frame-benchmarking-cli/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", "kitchensink-runtime/runtime-benchmarks", "node-inspect?/runtime-benchmarks", - "pallet-asset-tx-payment/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-contracts/runtime-benchmarks", - "pallet-glutton/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", - "pallet-skip-feeless-payment/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-treasury/runtime-benchmarks", - "sc-client-db/runtime-benchmarks", - "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", + "polkadot-sdk/runtime-benchmarks", ] -# Enable features that allow the runtime to be tried and debugged. Name might be subject to change -# in the near future. try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", "kitchensink-runtime/try-runtime", - "pallet-asset-conversion-tx-payment/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-assets/try-runtime", - "pallet-balances/try-runtime", - "pallet-contracts/try-runtime", - "pallet-glutton/try-runtime", - "pallet-im-online/try-runtime", - "pallet-root-testing/try-runtime", - "pallet-skip-feeless-payment/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-treasury/try-runtime", - "sp-runtime/try-runtime", + "polkadot-sdk/try-runtime", "substrate-cli-test-utils/try-runtime", ] diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index ef7ae4fdf26..c16b25187e5 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; + use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; use kitchensink_runtime::{constants::currency::*, BalancesCall}; diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index 30b52b9ecf6..fa4da5c13d4 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use polkadot_sdk::*; + use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index c4488415b98..6618f4b1132 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; use std::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; diff --git a/substrate/bin/node/cli/bin/main.rs b/substrate/bin/node/cli/bin/main.rs index ccc7d7b6b11..b18d0888055 100644 --- a/substrate/bin/node/cli/bin/main.rs +++ b/substrate/bin/node/cli/bin/main.rs @@ -20,6 +20,7 @@ #![warn(missing_docs)] +use polkadot_sdk::*; use staging_node_cli as node_cli; fn main() -> sc_cli::Result<()> { diff --git a/substrate/bin/node/cli/build.rs b/substrate/bin/node/cli/build.rs index 033f1e3349e..c25d15de057 100644 --- a/substrate/bin/node/cli/build.rs +++ b/substrate/bin/node/cli/build.rs @@ -27,8 +27,10 @@ mod cli { use clap::{CommandFactory, ValueEnum}; use clap_complete::{generate_to, Shell}; + use polkadot_sdk::substrate_build_script_utils::{ + generate_cargo_keys, rerun_if_git_head_changed, + }; use std::{env, fs, path::Path}; - use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; pub fn main() { build_shell_completion(); diff --git a/substrate/bin/node/cli/src/benchmarking.rs b/substrate/bin/node/cli/src/benchmarking.rs index 333f855f2d7..a2b28a0f317 100644 --- a/substrate/bin/node/cli/src/benchmarking.rs +++ b/substrate/bin/node/cli/src/benchmarking.rs @@ -22,6 +22,8 @@ use crate::service::{create_extrinsic, FullClient}; +use polkadot_sdk::*; + use kitchensink_runtime::{BalancesCall, SystemCall}; use node_primitives::{AccountId, Balance}; use sc_cli::Result; diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index d48d4a50f85..a3b536e5434 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -18,8 +18,8 @@ //! Substrate chain configurations. -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa_primitives::AuthorityId as GrandpaId; +use polkadot_sdk::*; + use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, Block, MaxNominations, SessionKeys, StakerStatus, }; @@ -30,6 +30,8 @@ use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; +use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_mixnet::types::AuthorityId as MixnetId; use sp_runtime::{ diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 1d1af6e03e9..c0dcacb2e4b 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; + /// An overarching CLI command definition. #[derive(Debug, clap::Parser)] pub struct Cli { diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index d869b77e912..51fbf0904cf 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; + use super::benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}; use crate::{ chain_spec, service, @@ -215,7 +217,7 @@ pub fn run() -> Result<()> { new_partial(&config, None)?; let aux_revert = Box::new(|client: Arc, backend, blocks| { sc_consensus_babe::revert(client.clone(), backend, blocks)?; - grandpa::revert(client, blocks)?; + sc_consensus_grandpa::revert(client, blocks)?; Ok(()) }); Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 938d73d91b1..84903bd9b87 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -20,6 +20,8 @@ //! Service implementation. Specialized wrapper over substrate service. +use polkadot_sdk::{sc_consensus_beefy as beefy, sc_consensus_grandpa as grandpa, *}; + use crate::Cli; use codec::Encode; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; @@ -670,7 +672,7 @@ pub fn new_full_base::Hash>>( let beefy_params = beefy::BeefyParams { client: client.clone(), backend: backend.clone(), - payload_provider: beefy_primitives::mmr::MmrRootProvider::new(client.clone()), + payload_provider: sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone()), runtime: client.clone(), key_store: keystore.clone(), network_params, @@ -844,6 +846,7 @@ mod tests { Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, }; use node_primitives::{Block, DigestItem, Signature}; + use polkadot_sdk::*; use sc_client_api::BlockBackend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index a9eea84d926..b1f737ce399 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -22,6 +22,7 @@ use frame_support::{ weights::Weight, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; +use polkadot_sdk::*; use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index 8de87c8b76e..95583395f73 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -18,6 +18,7 @@ use codec::{Decode, Encode}; use frame_support::Hashable; use frame_system::offchain::AppCrypto; +use polkadot_sdk::*; use sc_executor::error::Result; use sp_consensus_babe::{ digests::{PreDigest, SecondaryPlainPreDigest}, @@ -48,7 +49,7 @@ pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); pub mod sr25519 { mod app_sr25519 { use super::super::TEST_KEY_TYPE_ID; - use sp_application_crypto::{app_crypto, sr25519}; + use polkadot_sdk::sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, TEST_KEY_TYPE_ID); } diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 69c96bf63a6..9f82338b4fb 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -28,6 +28,7 @@ use kitchensink_runtime::{ }; use node_primitives::Balance; use node_testing::keyring::*; +use polkadot_sdk::*; use sp_runtime::{traits::One, Perbill}; pub mod common; diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index 5cbb0103d47..18826e7e90a 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -18,6 +18,7 @@ use codec::Decode; use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; use kitchensink_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; +use polkadot_sdk::*; use sp_application_crypto::AppCrypto; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index a96576e17e1..e8cc7b3482b 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -31,411 +31,44 @@ serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } -# primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features = ["serde"] } -sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-consensus-beefy = { path = "../../../primitives/consensus/beefy", default-features = false } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } -sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false } +polkadot-sdk = { path = "../../../../umbrella", features = ["runtime", "tuples-96"], default-features = false } + +# shared code between runtime and node node-primitives = { path = "../primitives", default-features = false } -sp-mixnet = { path = "../../../primitives/mixnet", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-staking = { path = "../../../primitives/staking", default-features = false, features = ["serde"] } -sp-storage = { path = "../../../primitives/storage", default-features = false } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } -sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features = ["serde"] } -sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } -sp-io = { path = "../../../primitives/io", default-features = false } -# frame dependencies -frame-executive = { path = "../../../frame/executive", default-features = false } -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } -frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } -frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false, features = ["experimental", "tuples-96"] } -frame-system = { path = "../../../frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } -frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-alliance = { path = "../../../frame/alliance", default-features = false } -pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false } -pallet-asset-conversion-ops = { path = "../../../frame/asset-conversion/ops", default-features = false } -pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false } -pallet-assets = { path = "../../../frame/assets", default-features = false } -pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../frame/authorship", default-features = false } -pallet-babe = { path = "../../../frame/babe", default-features = false } -pallet-bags-list = { path = "../../../frame/bags-list", default-features = false } -pallet-balances = { path = "../../../frame/balances", default-features = false } -pallet-beefy = { path = "../../../frame/beefy", default-features = false } -pallet-beefy-mmr = { path = "../../../frame/beefy-mmr", default-features = false } -pallet-bounties = { path = "../../../frame/bounties", default-features = false } -pallet-broker = { path = "../../../frame/broker", default-features = false } -pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false } -pallet-collective = { path = "../../../frame/collective", default-features = false } -pallet-contracts = { path = "../../../frame/contracts", default-features = false } -pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false } -pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false } -pallet-democracy = { path = "../../../frame/democracy", default-features = false } -pallet-election-provider-multi-phase = { path = "../../../frame/election-provider-multi-phase", default-features = false } -pallet-election-provider-support-benchmarking = { path = "../../../frame/election-provider-support/benchmarking", default-features = false, optional = true } -pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } -pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } -pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } -pallet-migrations = { path = "../../../frame/migrations", default-features = false } +# Example pallets that are not published: pallet-example-mbm = { path = "../../../frame/examples/multi-block-migrations", default-features = false } -pallet-nis = { path = "../../../frame/nis", default-features = false } -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } -pallet-im-online = { path = "../../../frame/im-online", default-features = false } -pallet-indices = { path = "../../../frame/indices", default-features = false } -pallet-identity = { path = "../../../frame/identity", default-features = false } -pallet-lottery = { path = "../../../frame/lottery", default-features = false } -pallet-membership = { path = "../../../frame/membership", default-features = false } -pallet-message-queue = { path = "../../../frame/message-queue", default-features = false } -pallet-mixnet = { path = "../../../frame/mixnet", default-features = false } -pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false } -pallet-multisig = { path = "../../../frame/multisig", default-features = false } -pallet-nfts = { path = "../../../frame/nfts", default-features = false } -pallet-nfts-runtime-api = { path = "../../../frame/nfts/runtime-api", default-features = false } -pallet-nft-fractionalization = { path = "../../../frame/nft-fractionalization", default-features = false } -pallet-nomination-pools = { path = "../../../frame/nomination-pools", default-features = false } -pallet-nomination-pools-benchmarking = { path = "../../../frame/nomination-pools/benchmarking", default-features = false, optional = true } -pallet-nomination-pools-runtime-api = { path = "../../../frame/nomination-pools/runtime-api", default-features = false } -pallet-offences = { path = "../../../frame/offences", default-features = false } -pallet-offences-benchmarking = { path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-glutton = { path = "../../../frame/glutton", default-features = false } -pallet-preimage = { path = "../../../frame/preimage", default-features = false } -pallet-proxy = { path = "../../../frame/proxy", default-features = false } -pallet-insecure-randomness-collective-flip = { path = "../../../frame/insecure-randomness-collective-flip", default-features = false } -pallet-ranked-collective = { path = "../../../frame/ranked-collective", default-features = false } -pallet-recovery = { path = "../../../frame/recovery", default-features = false } -pallet-referenda = { path = "../../../frame/referenda", default-features = false } -pallet-remark = { path = "../../../frame/remark", default-features = false } -pallet-root-testing = { path = "../../../frame/root-testing", default-features = false } -pallet-salary = { path = "../../../frame/salary", default-features = false } -pallet-session = { path = "../../../frame/session", default-features = false, features = ["historical"] } -pallet-session-benchmarking = { path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { path = "../../../frame/staking", default-features = false } -pallet-staking-reward-curve = { path = "../../../frame/staking/reward-curve", default-features = false } -pallet-staking-runtime-api = { path = "../../../frame/staking/runtime-api", default-features = false } -pallet-state-trie-migration = { path = "../../../frame/state-trie-migration", default-features = false } -pallet-statement = { path = "../../../frame/statement", default-features = false } -pallet-scheduler = { path = "../../../frame/scheduler", default-features = false } -pallet-society = { path = "../../../frame/society", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-tips = { path = "../../../frame/tips", default-features = false } -pallet-treasury = { path = "../../../frame/treasury", default-features = false } -pallet-utility = { path = "../../../frame/utility", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } -pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false } -pallet-uniques = { path = "../../../frame/uniques", default-features = false } -pallet-vesting = { path = "../../../frame/vesting", default-features = false } -pallet-whitelist = { path = "../../../frame/whitelist", default-features = false } -pallet-tx-pause = { path = "../../../frame/tx-pause", default-features = false } -pallet-safe-mode = { path = "../../../frame/safe-mode", default-features = false } -pallet-parameters = { path = "../../../frame/parameters", default-features = false } +pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] default = ["std"] -with-tracing = ["frame-executive/with-tracing"] +with-tracing = ["polkadot-sdk/with-tracing"] std = [ "codec/std", - "frame-benchmarking-pallet-pov/std", - "frame-benchmarking/std", - "frame-election-provider-support/std", - "frame-executive/std", - "frame-metadata-hash-extension/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", "log/std", "node-primitives/std", - "pallet-alliance/std", - "pallet-asset-conversion-ops/std", - "pallet-asset-conversion-tx-payment/std", - "pallet-asset-conversion/std", - "pallet-asset-rate/std", - "pallet-asset-tx-payment/std", - "pallet-assets/std", - "pallet-authority-discovery/std", - "pallet-authorship/std", - "pallet-babe/std", - "pallet-bags-list/std", - "pallet-balances/std", - "pallet-beefy-mmr/std", - "pallet-beefy/std", - "pallet-bounties/std", - "pallet-broker/std", - "pallet-child-bounties/std", - "pallet-collective/std", - "pallet-contracts/std", - "pallet-conviction-voting/std", - "pallet-core-fellowship/std", - "pallet-democracy/std", - "pallet-election-provider-multi-phase/std", - "pallet-election-provider-support-benchmarking?/std", - "pallet-elections-phragmen/std", "pallet-example-mbm/std", "pallet-example-tasks/std", - "pallet-fast-unstake/std", - "pallet-glutton/std", - "pallet-grandpa/std", - "pallet-identity/std", - "pallet-im-online/std", - "pallet-indices/std", - "pallet-insecure-randomness-collective-flip/std", - "pallet-lottery/std", - "pallet-membership/std", - "pallet-message-queue/std", - "pallet-migrations/std", - "pallet-mixnet/std", - "pallet-mmr/std", - "pallet-multisig/std", - "pallet-nft-fractionalization/std", - "pallet-nfts-runtime-api/std", - "pallet-nfts/std", - "pallet-nis/std", - "pallet-nomination-pools-benchmarking?/std", - "pallet-nomination-pools-runtime-api/std", - "pallet-nomination-pools/std", - "pallet-offences-benchmarking?/std", - "pallet-offences/std", - "pallet-parameters/std", - "pallet-preimage/std", - "pallet-proxy/std", - "pallet-ranked-collective/std", - "pallet-recovery/std", - "pallet-referenda/std", - "pallet-remark/std", - "pallet-root-testing/std", - "pallet-safe-mode/std", - "pallet-salary/std", - "pallet-scheduler/std", - "pallet-session-benchmarking?/std", - "pallet-session/std", - "pallet-skip-feeless-payment/std", - "pallet-society/std", - "pallet-staking-runtime-api/std", - "pallet-staking/std", - "pallet-state-trie-migration/std", - "pallet-statement/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-tips/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-transaction-storage/std", - "pallet-treasury/std", - "pallet-tx-pause/std", - "pallet-uniques/std", - "pallet-utility/std", - "pallet-vesting/std", - "pallet-whitelist/std", + "polkadot-sdk/std", "primitive-types/std", "scale-info/std", "serde_json/std", - "sp-api/std", - "sp-authority-discovery/std", - "sp-block-builder/std", - "sp-consensus-babe/std", - "sp-consensus-beefy/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-io/std", - "sp-mixnet/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-staking/std", - "sp-statement-store/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", "substrate-wasm-builder", ] runtime-benchmarks = [ - "frame-benchmarking-pallet-pov/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-election-provider-support/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-alliance/runtime-benchmarks", - "pallet-asset-conversion-ops/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-asset-rate/runtime-benchmarks", - "pallet-asset-tx-payment/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-babe/runtime-benchmarks", - "pallet-bags-list/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-bounties/runtime-benchmarks", - "pallet-broker/runtime-benchmarks", - "pallet-child-bounties/runtime-benchmarks", - "pallet-collective/runtime-benchmarks", - "pallet-contracts/runtime-benchmarks", - "pallet-conviction-voting/runtime-benchmarks", - "pallet-core-fellowship/runtime-benchmarks", - "pallet-democracy/runtime-benchmarks", - "pallet-election-provider-multi-phase/runtime-benchmarks", - "pallet-election-provider-support-benchmarking/runtime-benchmarks", - "pallet-elections-phragmen/runtime-benchmarks", "pallet-example-mbm/runtime-benchmarks", "pallet-example-tasks/runtime-benchmarks", - "pallet-fast-unstake/runtime-benchmarks", - "pallet-glutton/runtime-benchmarks", - "pallet-grandpa/runtime-benchmarks", - "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", - "pallet-indices/runtime-benchmarks", - "pallet-lottery/runtime-benchmarks", - "pallet-membership/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-migrations/runtime-benchmarks", - "pallet-mixnet/runtime-benchmarks", - "pallet-mmr/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-nft-fractionalization/runtime-benchmarks", - "pallet-nfts/runtime-benchmarks", - "pallet-nis/runtime-benchmarks", - "pallet-nomination-pools-benchmarking/runtime-benchmarks", - "pallet-nomination-pools/runtime-benchmarks", - "pallet-offences-benchmarking/runtime-benchmarks", - "pallet-offences/runtime-benchmarks", - "pallet-parameters/runtime-benchmarks", - "pallet-preimage/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-ranked-collective/runtime-benchmarks", - "pallet-recovery/runtime-benchmarks", - "pallet-referenda/runtime-benchmarks", - "pallet-remark/runtime-benchmarks", - "pallet-safe-mode/runtime-benchmarks", - "pallet-salary/runtime-benchmarks", - "pallet-scheduler/runtime-benchmarks", - "pallet-session-benchmarking/runtime-benchmarks", - "pallet-skip-feeless-payment/runtime-benchmarks", - "pallet-society/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "pallet-state-trie-migration/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-tips/runtime-benchmarks", - "pallet-transaction-storage/runtime-benchmarks", - "pallet-treasury/runtime-benchmarks", - "pallet-tx-pause/runtime-benchmarks", - "pallet-uniques/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-vesting/runtime-benchmarks", - "pallet-whitelist/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "sp-staking/runtime-benchmarks", + "polkadot-sdk/runtime-benchmarks", ] try-runtime = [ - "frame-benchmarking-pallet-pov/try-runtime", - "frame-election-provider-support/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-alliance/try-runtime", - "pallet-asset-conversion-ops/try-runtime", - "pallet-asset-conversion-tx-payment/try-runtime", - "pallet-asset-conversion/try-runtime", - "pallet-asset-rate/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-assets/try-runtime", - "pallet-authority-discovery/try-runtime", - "pallet-authorship/try-runtime", - "pallet-babe/try-runtime", - "pallet-bags-list/try-runtime", - "pallet-balances/try-runtime", - "pallet-beefy-mmr/try-runtime", - "pallet-beefy/try-runtime", - "pallet-bounties/try-runtime", - "pallet-broker/try-runtime", - "pallet-child-bounties/try-runtime", - "pallet-collective/try-runtime", - "pallet-contracts/try-runtime", - "pallet-conviction-voting/try-runtime", - "pallet-core-fellowship/try-runtime", - "pallet-democracy/try-runtime", - "pallet-election-provider-multi-phase/try-runtime", - "pallet-elections-phragmen/try-runtime", "pallet-example-mbm/try-runtime", "pallet-example-tasks/try-runtime", - "pallet-fast-unstake/try-runtime", - "pallet-glutton/try-runtime", - "pallet-grandpa/try-runtime", - "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", - "pallet-indices/try-runtime", - "pallet-insecure-randomness-collective-flip/try-runtime", - "pallet-lottery/try-runtime", - "pallet-membership/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-migrations/try-runtime", - "pallet-mixnet/try-runtime", - "pallet-mmr/try-runtime", - "pallet-multisig/try-runtime", - "pallet-nft-fractionalization/try-runtime", - "pallet-nfts/try-runtime", - "pallet-nis/try-runtime", - "pallet-nomination-pools/try-runtime", - "pallet-offences/try-runtime", - "pallet-parameters/try-runtime", - "pallet-preimage/try-runtime", - "pallet-proxy/try-runtime", - "pallet-ranked-collective/try-runtime", - "pallet-recovery/try-runtime", - "pallet-referenda/try-runtime", - "pallet-remark/try-runtime", - "pallet-root-testing/try-runtime", - "pallet-safe-mode/try-runtime", - "pallet-salary/try-runtime", - "pallet-scheduler/try-runtime", - "pallet-session/try-runtime", - "pallet-skip-feeless-payment/try-runtime", - "pallet-society/try-runtime", - "pallet-staking/try-runtime", - "pallet-state-trie-migration/try-runtime", - "pallet-statement/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-tips/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-transaction-storage/try-runtime", - "pallet-treasury/try-runtime", - "pallet-tx-pause/try-runtime", - "pallet-uniques/try-runtime", - "pallet-utility/try-runtime", - "pallet-vesting/try-runtime", - "pallet-whitelist/try-runtime", - "sp-runtime/try-runtime", + "polkadot-sdk/try-runtime", ] experimental = [ - "frame-support/experimental", - "frame-system/experimental", "pallet-example-tasks/experimental", ] diff --git a/substrate/bin/node/runtime/src/assets_api.rs b/substrate/bin/node/runtime/src/assets_api.rs index 792ed7c6576..38ec5650711 100644 --- a/substrate/bin/node/runtime/src/assets_api.rs +++ b/substrate/bin/node/runtime/src/assets_api.rs @@ -18,6 +18,8 @@ //! Runtime API definition for assets. +use polkadot_sdk::*; + use codec::Codec; use sp_std::vec::Vec; diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 34f043b33a4..dbe562857c9 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -17,6 +17,8 @@ //! Some configurable implementations as associated type for the substrate runtime. +use polkadot_sdk::*; + use frame_support::{ pallet_prelude::*, traits::{ @@ -118,6 +120,7 @@ mod multiplier_tests { weights::{Weight, WeightToFee}, }; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; + use polkadot_sdk::*; use sp_runtime::{ assert_eq_error_rate, traits::{Convert, One, Zero}, diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 617088ffe1f..7d9128bb940 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -22,6 +22,8 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limits. #![recursion_limit = "1024"] +use polkadot_sdk::*; + use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, @@ -2229,6 +2231,8 @@ impl pallet_parameters::Config for Runtime { #[frame_support::runtime] mod runtime { + use super::*; + #[runtime::runtime] #[runtime::derive( RuntimeCall, @@ -2571,7 +2575,7 @@ impl pallet_beefy::Config for Runtime { /// MMR helper types. mod mmr { - use super::Runtime; + use super::*; pub use pallet_mmr::primitives::*; pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; @@ -2581,7 +2585,7 @@ mod mmr { #[cfg(feature = "runtime-benchmarks")] mod benches { - frame_benchmarking::define_benchmarks!( + polkadot_sdk::frame_benchmarking::define_benchmarks!( [frame_benchmarking, BaselineBench::] [frame_benchmarking_pallet_pov, Pov] [pallet_alliance, Alliance] diff --git a/substrate/client/chain-spec/derive/src/impls.rs b/substrate/client/chain-spec/derive/src/impls.rs index c0624897c13..d8b20c5c2a8 100644 --- a/substrate/client/chain-spec/derive/src/impls.rs +++ b/substrate/client/chain-spec/derive/src/impls.rs @@ -19,7 +19,7 @@ use proc_macro2::{Span, TokenStream}; use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; -use syn::{DeriveInput, Error, Ident}; +use syn::{DeriveInput, Error, Ident, Path}; const CRATE_NAME: &str = "sc-chain-spec"; const ATTRIBUTE_NAME: &str = "forks"; @@ -143,7 +143,7 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { pub fn derive( ast: &DeriveInput, derive: impl Fn( - &Ident, + &Path, &Ident, &syn::Generics, Vec<&Ident>, @@ -171,25 +171,28 @@ pub fn derive( }; let name = &ast.ident; - let crate_name = match crate_name(CRATE_NAME) { + let crate_path = match crate_name(CRATE_NAME) { Ok(FoundCrate::Itself) => CRATE_NAME.replace("-", "_"), Ok(FoundCrate::Name(chain_spec_name)) => chain_spec_name, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - return quote!( #err ).into() + Err(e) => match crate_name("polkadot-sdk") { + Ok(FoundCrate::Name(sdk)) => format!("{sdk}::{CRATE_NAME}").replace("-", "_"), + _ => { + return Error::new(Span::call_site(), &e).to_compile_error().into(); + }, }, }; - let crate_name = Ident::new(&crate_name, Span::call_site()); + let crate_path = + syn::parse_str::(&crate_path).expect("crate_name returns valid path; qed"); let field_names = fields.named.iter().flat_map(|x| x.ident.as_ref()).collect::>(); let field_types = fields.named.iter().map(|x| &x.ty).collect::>(); - derive(&crate_name, name, &ast.generics, field_names, field_types, fields).into() + derive(&crate_path, name, &ast.generics, field_names, field_types, fields).into() } -fn generate_fork_fields(crate_name: &Ident, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { - let crate_name = std::iter::repeat(crate_name); +fn generate_fork_fields(crate_path: &Path, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { + let crate_path = std::iter::repeat(crate_path); quote! { - #( pub #names: Option<<#types as #crate_name::Group>::Fork>, )* + #( pub #names: Option<<#types as #crate_path::Group>::Fork>, )* } } diff --git a/substrate/frame/election-provider-support/solution-type/src/lib.rs b/substrate/frame/election-provider-support/solution-type/src/lib.rs index 80773f6fb47..1a88f0cf835 100644 --- a/substrate/frame/election-provider-support/solution-type/src/lib.rs +++ b/substrate/frame/election-provider-support/solution-type/src/lib.rs @@ -263,7 +263,16 @@ fn imports() -> Result { use _feps::private as _fepsp; )) }, - Err(e) => Err(syn::Error::new(Span::call_site(), e)), + Err(e) => match crate_name("polkadot-sdk") { + Ok(FoundCrate::Name(polkadot_sdk)) => { + let ident = syn::Ident::new(&polkadot_sdk, Span::call_site()); + Ok(quote!( + use #ident::frame_election_provider_support as _feps; + use _feps::private as _fepsp; + )) + }, + _ => Err(syn::Error::new(Span::call_site(), e)), + }, } } diff --git a/substrate/frame/staking/reward-curve/src/lib.rs b/substrate/frame/staking/reward-curve/src/lib.rs index 1986357edab..cfb8b896f93 100644 --- a/substrate/frame/staking/reward-curve/src/lib.rs +++ b/substrate/frame/staking/reward-curve/src/lib.rs @@ -88,7 +88,13 @@ pub fn build(input: TokenStream) -> TokenStream { let ident = syn::Ident::new(&sp_runtime, Span::call_site()); quote!( #[doc(hidden)] pub use #ident as _sp_runtime; ) }, - Err(e) => syn::Error::new(Span::call_site(), e).to_compile_error(), + Err(e) => match crate_name("polkadot-sdk") { + Ok(FoundCrate::Name(polkadot_sdk)) => { + let ident = syn::Ident::new(&polkadot_sdk, Span::call_site()); + quote!( #[doc(hidden)] pub use #ident::sp_runtime as _sp_runtime; ) + }, + _ => syn::Error::new(Span::call_site(), e).to_compile_error(), + }, }; let const_name = input.ident; diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index 8952cd6011f..ea53335a88f 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -70,6 +70,8 @@ pub fn is_using_frame_crate(path: &syn::Path) -> bool { pub fn generate_access_from_frame_or_crate(def_crate: &str) -> Result { if let Some(path) = get_frame_crate_path(def_crate) { Ok(path) + } else if let Some(path) = get_sdk_crate_path(def_crate) { + Ok(path) } else { let ident = match crate_name(def_crate) { Ok(FoundCrate::Itself) => { @@ -95,6 +97,13 @@ pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream pub use #path as hidden_include; } ) + } else if let Some(path) = get_sdk_crate_path(def_crate) { + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub use #path as hidden_include; + } + ) } else { match crate_name(def_crate) { Ok(FoundCrate::Itself) => quote!(), @@ -128,6 +137,15 @@ fn get_frame_crate_path(def_crate: &str) -> Option { } } +fn get_sdk_crate_path(def_crate: &str) -> Option { + if let Ok(FoundCrate::Name(name)) = crate_name(&"polkadot-sdk") { + let path = format!("{}::{}", name, def_crate.to_string()).replace("-", "_"); + Some(syn::parse_str::(&path).expect("is a valid path; qed")) + } else { + None + } +} + // fn to remove white spaces around string types // (basically whitespaces around tokens) pub fn clean_type_string(input: &str) -> String { diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index d90b5605864..36577670a40 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -40,6 +40,10 @@ pub fn generate_crate_access() -> TokenStream { let path = format!("{}::deps::sp_api::__private", name); let path = syn::parse_str::(&path).expect("is a valid path; qed"); quote!( #path ) + } else if let Ok(FoundCrate::Name(name)) = crate_name(&"polkadot-sdk") { + let path = format!("{}::sp_api::__private", name); + let path = syn::parse_str::(&path).expect("is a valid path; qed"); + quote!( #path ) } else { let err = Error::new(Span::call_site(), e).to_compile_error(); quote!( #err ) diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml new file mode 100644 index 00000000000..9f1308d5096 --- /dev/null +++ b/umbrella/Cargo.toml @@ -0,0 +1,2449 @@ +[package] +name = "polkadot-sdk" +version = "0.1.0" +description = "Polkadot SDK umbrella crate." +license = "Apache-2.0" + +[features] +default = ["std"] +std = [ + "asset-test-utils?/std", + "assets-common?/std", + "binary-merkle-tree?/std", + "bp-asset-hub-rococo?/std", + "bp-asset-hub-westend?/std", + "bp-bridge-hub-cumulus?/std", + "bp-bridge-hub-kusama?/std", + "bp-bridge-hub-polkadot?/std", + "bp-bridge-hub-rococo?/std", + "bp-bridge-hub-westend?/std", + "bp-header-chain?/std", + "bp-kusama?/std", + "bp-messages?/std", + "bp-parachains?/std", + "bp-polkadot-bulletin?/std", + "bp-polkadot-core?/std", + "bp-polkadot?/std", + "bp-relayers?/std", + "bp-rococo?/std", + "bp-runtime?/std", + "bp-test-utils?/std", + "bp-westend?/std", + "bp-xcm-bridge-hub-router?/std", + "bp-xcm-bridge-hub?/std", + "bridge-hub-common?/std", + "bridge-hub-test-utils?/std", + "bridge-runtime-common?/std", + "cumulus-pallet-aura-ext?/std", + "cumulus-pallet-dmp-queue?/std", + "cumulus-pallet-parachain-system-proc-macro?/std", + "cumulus-pallet-parachain-system?/std", + "cumulus-pallet-session-benchmarking?/std", + "cumulus-pallet-solo-to-para?/std", + "cumulus-pallet-xcm?/std", + "cumulus-pallet-xcmp-queue?/std", + "cumulus-ping?/std", + "cumulus-primitives-aura?/std", + "cumulus-primitives-core?/std", + "cumulus-primitives-parachain-inherent?/std", + "cumulus-primitives-proof-size-hostfunction?/std", + "cumulus-primitives-storage-weight-reclaim?/std", + "cumulus-primitives-timestamp?/std", + "cumulus-primitives-utility?/std", + "cumulus-test-relay-sproof-builder?/std", + "frame-benchmarking-pallet-pov?/std", + "frame-benchmarking?/std", + "frame-election-provider-support?/std", + "frame-executive?/std", + "frame-metadata-hash-extension?/std", + "frame-support-procedural?/std", + "frame-support?/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api?/std", + "frame-system?/std", + "frame-try-runtime?/std", + "pallet-alliance?/std", + "pallet-asset-conversion-ops?/std", + "pallet-asset-conversion-tx-payment?/std", + "pallet-asset-conversion?/std", + "pallet-asset-rate?/std", + "pallet-asset-tx-payment?/std", + "pallet-assets?/std", + "pallet-atomic-swap?/std", + "pallet-aura?/std", + "pallet-authority-discovery?/std", + "pallet-authorship?/std", + "pallet-babe?/std", + "pallet-bags-list?/std", + "pallet-balances?/std", + "pallet-beefy-mmr?/std", + "pallet-beefy?/std", + "pallet-bounties?/std", + "pallet-bridge-grandpa?/std", + "pallet-bridge-messages?/std", + "pallet-bridge-parachains?/std", + "pallet-bridge-relayers?/std", + "pallet-broker?/std", + "pallet-child-bounties?/std", + "pallet-collator-selection?/std", + "pallet-collective-content?/std", + "pallet-collective?/std", + "pallet-contracts-mock-network?/std", + "pallet-contracts?/std", + "pallet-conviction-voting?/std", + "pallet-core-fellowship?/std", + "pallet-delegated-staking?/std", + "pallet-democracy?/std", + "pallet-dev-mode?/std", + "pallet-election-provider-multi-phase?/std", + "pallet-election-provider-support-benchmarking?/std", + "pallet-elections-phragmen?/std", + "pallet-fast-unstake?/std", + "pallet-glutton?/std", + "pallet-grandpa?/std", + "pallet-identity?/std", + "pallet-im-online?/std", + "pallet-indices?/std", + "pallet-insecure-randomness-collective-flip?/std", + "pallet-lottery?/std", + "pallet-membership?/std", + "pallet-message-queue?/std", + "pallet-migrations?/std", + "pallet-mixnet?/std", + "pallet-mmr?/std", + "pallet-multisig?/std", + "pallet-nft-fractionalization?/std", + "pallet-nfts-runtime-api?/std", + "pallet-nfts?/std", + "pallet-nis?/std", + "pallet-node-authorization?/std", + "pallet-nomination-pools-benchmarking?/std", + "pallet-nomination-pools-runtime-api?/std", + "pallet-nomination-pools?/std", + "pallet-offences-benchmarking?/std", + "pallet-offences?/std", + "pallet-paged-list?/std", + "pallet-parameters?/std", + "pallet-preimage?/std", + "pallet-proxy?/std", + "pallet-ranked-collective?/std", + "pallet-recovery?/std", + "pallet-referenda?/std", + "pallet-remark?/std", + "pallet-root-offences?/std", + "pallet-root-testing?/std", + "pallet-safe-mode?/std", + "pallet-salary?/std", + "pallet-scheduler?/std", + "pallet-scored-pool?/std", + "pallet-session-benchmarking?/std", + "pallet-session?/std", + "pallet-skip-feeless-payment?/std", + "pallet-society?/std", + "pallet-staking-reward-fn?/std", + "pallet-staking-runtime-api?/std", + "pallet-staking?/std", + "pallet-state-trie-migration?/std", + "pallet-statement?/std", + "pallet-sudo?/std", + "pallet-timestamp?/std", + "pallet-tips?/std", + "pallet-transaction-payment-rpc-runtime-api?/std", + "pallet-transaction-payment?/std", + "pallet-transaction-storage?/std", + "pallet-treasury?/std", + "pallet-tx-pause?/std", + "pallet-uniques?/std", + "pallet-utility?/std", + "pallet-vesting?/std", + "pallet-whitelist?/std", + "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub-router?/std", + "pallet-xcm-bridge-hub?/std", + "pallet-xcm?/std", + "parachains-common?/std", + "parachains-runtimes-test-utils?/std", + "polkadot-core-primitives?/std", + "polkadot-parachain-primitives?/std", + "polkadot-primitives?/std", + "polkadot-runtime-common?/std", + "polkadot-runtime-metrics?/std", + "polkadot-runtime-parachains?/std", + "polkadot-sdk-frame?/std", + "rococo-runtime-constants?/std", + "sc-executor?/std", + "slot-range-helper?/std", + "snowbridge-beacon-primitives?/std", + "snowbridge-core?/std", + "snowbridge-ethereum?/std", + "snowbridge-outbound-queue-merkle-tree?/std", + "snowbridge-outbound-queue-runtime-api?/std", + "snowbridge-pallet-ethereum-client-fixtures?/std", + "snowbridge-pallet-ethereum-client?/std", + "snowbridge-pallet-inbound-queue-fixtures?/std", + "snowbridge-pallet-inbound-queue?/std", + "snowbridge-pallet-outbound-queue?/std", + "snowbridge-pallet-system?/std", + "snowbridge-router-primitives?/std", + "snowbridge-runtime-common?/std", + "snowbridge-runtime-test-common?/std", + "snowbridge-system-runtime-api?/std", + "sp-api-proc-macro?/std", + "sp-api?/std", + "sp-application-crypto?/std", + "sp-arithmetic?/std", + "sp-authority-discovery?/std", + "sp-block-builder?/std", + "sp-consensus-aura?/std", + "sp-consensus-babe?/std", + "sp-consensus-beefy?/std", + "sp-consensus-grandpa?/std", + "sp-consensus-pow?/std", + "sp-consensus-slots?/std", + "sp-core-hashing?/std", + "sp-core?/std", + "sp-crypto-ec-utils?/std", + "sp-crypto-hashing?/std", + "sp-debug-derive?/std", + "sp-externalities?/std", + "sp-genesis-builder?/std", + "sp-inherents?/std", + "sp-io?/std", + "sp-keyring?/std", + "sp-keystore?/std", + "sp-metadata-ir?/std", + "sp-mixnet?/std", + "sp-mmr-primitives?/std", + "sp-npos-elections?/std", + "sp-offchain?/std", + "sp-runtime-interface?/std", + "sp-runtime?/std", + "sp-session?/std", + "sp-staking?/std", + "sp-state-machine?/std", + "sp-statement-store?/std", + "sp-std?/std", + "sp-storage?/std", + "sp-timestamp?/std", + "sp-tracing?/std", + "sp-transaction-pool?/std", + "sp-transaction-storage-proof?/std", + "sp-trie?/std", + "sp-version?/std", + "sp-wasm-interface?/std", + "sp-weights?/std", + "staging-parachain-info?/std", + "staging-xcm-builder?/std", + "staging-xcm-executor?/std", + "staging-xcm?/std", + "substrate-bip39?/std", + "testnet-parachains-constants?/std", + "westend-runtime-constants?/std", + "xcm-fee-payment-runtime-api?/std", +] +runtime-benchmarks = [ + "assets-common?/runtime-benchmarks", + "bridge-hub-common?/runtime-benchmarks", + "bridge-runtime-common?/runtime-benchmarks", + "cumulus-pallet-dmp-queue?/runtime-benchmarks", + "cumulus-pallet-parachain-system?/runtime-benchmarks", + "cumulus-pallet-session-benchmarking?/runtime-benchmarks", + "cumulus-pallet-xcmp-queue?/runtime-benchmarks", + "cumulus-primitives-core?/runtime-benchmarks", + "cumulus-primitives-utility?/runtime-benchmarks", + "frame-benchmarking-cli?/runtime-benchmarks", + "frame-benchmarking-pallet-pov?/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support?/runtime-benchmarks", + "frame-support?/runtime-benchmarks", + "frame-system-benchmarking?/runtime-benchmarks", + "frame-system?/runtime-benchmarks", + "pallet-alliance?/runtime-benchmarks", + "pallet-asset-conversion-ops?/runtime-benchmarks", + "pallet-asset-conversion?/runtime-benchmarks", + "pallet-asset-rate?/runtime-benchmarks", + "pallet-asset-tx-payment?/runtime-benchmarks", + "pallet-assets?/runtime-benchmarks", + "pallet-babe?/runtime-benchmarks", + "pallet-bags-list?/runtime-benchmarks", + "pallet-balances?/runtime-benchmarks", + "pallet-bounties?/runtime-benchmarks", + "pallet-bridge-grandpa?/runtime-benchmarks", + "pallet-bridge-messages?/runtime-benchmarks", + "pallet-bridge-parachains?/runtime-benchmarks", + "pallet-bridge-relayers?/runtime-benchmarks", + "pallet-broker?/runtime-benchmarks", + "pallet-child-bounties?/runtime-benchmarks", + "pallet-collator-selection?/runtime-benchmarks", + "pallet-collective-content?/runtime-benchmarks", + "pallet-collective?/runtime-benchmarks", + "pallet-contracts-mock-network?/runtime-benchmarks", + "pallet-contracts?/runtime-benchmarks", + "pallet-conviction-voting?/runtime-benchmarks", + "pallet-core-fellowship?/runtime-benchmarks", + "pallet-delegated-staking?/runtime-benchmarks", + "pallet-democracy?/runtime-benchmarks", + "pallet-election-provider-multi-phase?/runtime-benchmarks", + "pallet-election-provider-support-benchmarking?/runtime-benchmarks", + "pallet-elections-phragmen?/runtime-benchmarks", + "pallet-fast-unstake?/runtime-benchmarks", + "pallet-glutton?/runtime-benchmarks", + "pallet-grandpa?/runtime-benchmarks", + "pallet-identity?/runtime-benchmarks", + "pallet-im-online?/runtime-benchmarks", + "pallet-indices?/runtime-benchmarks", + "pallet-lottery?/runtime-benchmarks", + "pallet-membership?/runtime-benchmarks", + "pallet-message-queue?/runtime-benchmarks", + "pallet-migrations?/runtime-benchmarks", + "pallet-mixnet?/runtime-benchmarks", + "pallet-mmr?/runtime-benchmarks", + "pallet-multisig?/runtime-benchmarks", + "pallet-nft-fractionalization?/runtime-benchmarks", + "pallet-nfts?/runtime-benchmarks", + "pallet-nis?/runtime-benchmarks", + "pallet-nomination-pools-benchmarking?/runtime-benchmarks", + "pallet-nomination-pools?/runtime-benchmarks", + "pallet-offences-benchmarking?/runtime-benchmarks", + "pallet-offences?/runtime-benchmarks", + "pallet-paged-list?/runtime-benchmarks", + "pallet-parameters?/runtime-benchmarks", + "pallet-preimage?/runtime-benchmarks", + "pallet-proxy?/runtime-benchmarks", + "pallet-ranked-collective?/runtime-benchmarks", + "pallet-recovery?/runtime-benchmarks", + "pallet-referenda?/runtime-benchmarks", + "pallet-remark?/runtime-benchmarks", + "pallet-root-offences?/runtime-benchmarks", + "pallet-safe-mode?/runtime-benchmarks", + "pallet-salary?/runtime-benchmarks", + "pallet-scheduler?/runtime-benchmarks", + "pallet-session-benchmarking?/runtime-benchmarks", + "pallet-skip-feeless-payment?/runtime-benchmarks", + "pallet-society?/runtime-benchmarks", + "pallet-staking?/runtime-benchmarks", + "pallet-state-trie-migration?/runtime-benchmarks", + "pallet-sudo?/runtime-benchmarks", + "pallet-timestamp?/runtime-benchmarks", + "pallet-tips?/runtime-benchmarks", + "pallet-transaction-storage?/runtime-benchmarks", + "pallet-treasury?/runtime-benchmarks", + "pallet-tx-pause?/runtime-benchmarks", + "pallet-uniques?/runtime-benchmarks", + "pallet-utility?/runtime-benchmarks", + "pallet-vesting?/runtime-benchmarks", + "pallet-whitelist?/runtime-benchmarks", + "pallet-xcm-benchmarks?/runtime-benchmarks", + "pallet-xcm-bridge-hub-router?/runtime-benchmarks", + "pallet-xcm-bridge-hub?/runtime-benchmarks", + "pallet-xcm?/runtime-benchmarks", + "parachains-common?/runtime-benchmarks", + "polkadot-cli?/runtime-benchmarks", + "polkadot-node-metrics?/runtime-benchmarks", + "polkadot-parachain-primitives?/runtime-benchmarks", + "polkadot-primitives?/runtime-benchmarks", + "polkadot-runtime-common?/runtime-benchmarks", + "polkadot-runtime-parachains?/runtime-benchmarks", + "polkadot-sdk-frame?/runtime-benchmarks", + "polkadot-service?/runtime-benchmarks", + "sc-client-db?/runtime-benchmarks", + "sc-service?/runtime-benchmarks", + "snowbridge-core?/runtime-benchmarks", + "snowbridge-pallet-ethereum-client-fixtures?/runtime-benchmarks", + "snowbridge-pallet-ethereum-client?/runtime-benchmarks", + "snowbridge-pallet-inbound-queue-fixtures?/runtime-benchmarks", + "snowbridge-pallet-inbound-queue?/runtime-benchmarks", + "snowbridge-pallet-outbound-queue?/runtime-benchmarks", + "snowbridge-pallet-system?/runtime-benchmarks", + "snowbridge-router-primitives?/runtime-benchmarks", + "snowbridge-runtime-common?/runtime-benchmarks", + "snowbridge-runtime-test-common?/runtime-benchmarks", + "sp-runtime?/runtime-benchmarks", + "sp-staking?/runtime-benchmarks", + "staging-node-inspect?/runtime-benchmarks", + "staging-xcm-builder?/runtime-benchmarks", + "staging-xcm-executor?/runtime-benchmarks", + "xcm-fee-payment-runtime-api?/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext?/try-runtime", + "cumulus-pallet-dmp-queue?/try-runtime", + "cumulus-pallet-parachain-system?/try-runtime", + "cumulus-pallet-solo-to-para?/try-runtime", + "cumulus-pallet-xcm?/try-runtime", + "cumulus-pallet-xcmp-queue?/try-runtime", + "cumulus-ping?/try-runtime", + "frame-benchmarking-pallet-pov?/try-runtime", + "frame-election-provider-support?/try-runtime", + "frame-executive?/try-runtime", + "frame-support?/try-runtime", + "frame-system?/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-alliance?/try-runtime", + "pallet-asset-conversion-ops?/try-runtime", + "pallet-asset-conversion-tx-payment?/try-runtime", + "pallet-asset-conversion?/try-runtime", + "pallet-asset-rate?/try-runtime", + "pallet-asset-tx-payment?/try-runtime", + "pallet-assets?/try-runtime", + "pallet-atomic-swap?/try-runtime", + "pallet-aura?/try-runtime", + "pallet-authority-discovery?/try-runtime", + "pallet-authorship?/try-runtime", + "pallet-babe?/try-runtime", + "pallet-bags-list?/try-runtime", + "pallet-balances?/try-runtime", + "pallet-beefy-mmr?/try-runtime", + "pallet-beefy?/try-runtime", + "pallet-bounties?/try-runtime", + "pallet-bridge-grandpa?/try-runtime", + "pallet-bridge-messages?/try-runtime", + "pallet-bridge-parachains?/try-runtime", + "pallet-bridge-relayers?/try-runtime", + "pallet-broker?/try-runtime", + "pallet-child-bounties?/try-runtime", + "pallet-collator-selection?/try-runtime", + "pallet-collective-content?/try-runtime", + "pallet-collective?/try-runtime", + "pallet-contracts?/try-runtime", + "pallet-conviction-voting?/try-runtime", + "pallet-core-fellowship?/try-runtime", + "pallet-delegated-staking?/try-runtime", + "pallet-democracy?/try-runtime", + "pallet-dev-mode?/try-runtime", + "pallet-election-provider-multi-phase?/try-runtime", + "pallet-elections-phragmen?/try-runtime", + "pallet-fast-unstake?/try-runtime", + "pallet-glutton?/try-runtime", + "pallet-grandpa?/try-runtime", + "pallet-identity?/try-runtime", + "pallet-im-online?/try-runtime", + "pallet-indices?/try-runtime", + "pallet-insecure-randomness-collective-flip?/try-runtime", + "pallet-lottery?/try-runtime", + "pallet-membership?/try-runtime", + "pallet-message-queue?/try-runtime", + "pallet-migrations?/try-runtime", + "pallet-mixnet?/try-runtime", + "pallet-mmr?/try-runtime", + "pallet-multisig?/try-runtime", + "pallet-nft-fractionalization?/try-runtime", + "pallet-nfts?/try-runtime", + "pallet-nis?/try-runtime", + "pallet-node-authorization?/try-runtime", + "pallet-nomination-pools?/try-runtime", + "pallet-offences?/try-runtime", + "pallet-paged-list?/try-runtime", + "pallet-parameters?/try-runtime", + "pallet-preimage?/try-runtime", + "pallet-proxy?/try-runtime", + "pallet-ranked-collective?/try-runtime", + "pallet-recovery?/try-runtime", + "pallet-referenda?/try-runtime", + "pallet-remark?/try-runtime", + "pallet-root-offences?/try-runtime", + "pallet-root-testing?/try-runtime", + "pallet-safe-mode?/try-runtime", + "pallet-salary?/try-runtime", + "pallet-scheduler?/try-runtime", + "pallet-scored-pool?/try-runtime", + "pallet-session?/try-runtime", + "pallet-skip-feeless-payment?/try-runtime", + "pallet-society?/try-runtime", + "pallet-staking?/try-runtime", + "pallet-state-trie-migration?/try-runtime", + "pallet-statement?/try-runtime", + "pallet-sudo?/try-runtime", + "pallet-timestamp?/try-runtime", + "pallet-tips?/try-runtime", + "pallet-transaction-payment?/try-runtime", + "pallet-transaction-storage?/try-runtime", + "pallet-treasury?/try-runtime", + "pallet-tx-pause?/try-runtime", + "pallet-uniques?/try-runtime", + "pallet-utility?/try-runtime", + "pallet-vesting?/try-runtime", + "pallet-whitelist?/try-runtime", + "pallet-xcm-bridge-hub-router?/try-runtime", + "pallet-xcm-bridge-hub?/try-runtime", + "pallet-xcm?/try-runtime", + "polkadot-cli?/try-runtime", + "polkadot-runtime-common?/try-runtime", + "polkadot-runtime-parachains?/try-runtime", + "polkadot-sdk-frame?/try-runtime", + "polkadot-service?/try-runtime", + "snowbridge-pallet-ethereum-client?/try-runtime", + "snowbridge-pallet-inbound-queue?/try-runtime", + "snowbridge-pallet-outbound-queue?/try-runtime", + "snowbridge-pallet-system?/try-runtime", + "sp-runtime?/try-runtime", + "staging-parachain-info?/try-runtime", +] +serde = [ + "bp-polkadot-core?/serde", + "frame-benchmarking?/serde", + "pallet-asset-tx-payment?/serde", + "pallet-beefy-mmr?/serde", + "pallet-beefy?/serde", + "pallet-contracts?/serde", + "pallet-conviction-voting?/serde", + "pallet-democracy?/serde", + "pallet-message-queue?/serde", + "pallet-offences?/serde", + "pallet-parameters?/serde", + "pallet-referenda?/serde", + "pallet-remark?/serde", + "pallet-state-trie-migration?/serde", + "pallet-tips?/serde", + "pallet-transaction-payment?/serde", + "pallet-transaction-storage?/serde", + "pallet-treasury?/serde", + "pallet-xcm?/serde", + "snowbridge-beacon-primitives?/serde", + "snowbridge-core?/serde", + "snowbridge-ethereum?/serde", + "snowbridge-pallet-ethereum-client?/serde", + "snowbridge-pallet-inbound-queue?/serde", + "sp-application-crypto?/serde", + "sp-arithmetic?/serde", + "sp-authority-discovery?/serde", + "sp-consensus-aura?/serde", + "sp-consensus-babe?/serde", + "sp-consensus-beefy?/serde", + "sp-consensus-grandpa?/serde", + "sp-consensus-slots?/serde", + "sp-core?/serde", + "sp-mmr-primitives?/serde", + "sp-npos-elections?/serde", + "sp-runtime?/serde", + "sp-staking?/serde", + "sp-statement-store?/serde", + "sp-storage?/serde", + "sp-version?/serde", + "sp-weights?/serde", +] +experimental = [ + "frame-support-procedural?/experimental", + "frame-support?/experimental", + "frame-system?/experimental", + "polkadot-sdk-frame?/experimental", +] +with-tracing = [ + "frame-executive?/with-tracing", + "frame-executive?/with-tracing", + "sp-io?/with-tracing", + "sp-io?/with-tracing", + "sp-tracing?/with-tracing", + "sp-tracing?/with-tracing", +] +runtime = ["assets-common", "binary-merkle-tree", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-cumulus", "bp-bridge-hub-kusama", "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "bp-header-chain", "bp-kusama", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-test-utils", "bp-westend", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "rococo-runtime-constants", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-fee-payment-runtime-api", "xcm-procedural"] +node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] +tuples-96 = [ + "frame-support-procedural?/tuples-96", + "frame-support?/tuples-96", +] + +[package.edition] +workspace = true + +[package.authors] +workspace = true + +[dependencies.assets-common] +path = "../cumulus/parachains/runtimes/assets/common" +default-features = false +optional = true + +[dependencies.binary-merkle-tree] +path = "../substrate/utils/binary-merkle-tree" +default-features = false +optional = true + +[dependencies.bp-asset-hub-rococo] +path = "../bridges/chains/chain-asset-hub-rococo" +default-features = false +optional = true + +[dependencies.bp-asset-hub-westend] +path = "../bridges/chains/chain-asset-hub-westend" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-cumulus] +path = "../bridges/chains/chain-bridge-hub-cumulus" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-kusama] +path = "../bridges/chains/chain-bridge-hub-kusama" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-polkadot] +path = "../bridges/chains/chain-bridge-hub-polkadot" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-rococo] +path = "../bridges/chains/chain-bridge-hub-rococo" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-westend] +path = "../bridges/chains/chain-bridge-hub-westend" +default-features = false +optional = true + +[dependencies.bp-header-chain] +path = "../bridges/primitives/header-chain" +default-features = false +optional = true + +[dependencies.bp-kusama] +path = "../bridges/chains/chain-kusama" +default-features = false +optional = true + +[dependencies.bp-messages] +path = "../bridges/primitives/messages" +default-features = false +optional = true + +[dependencies.bp-parachains] +path = "../bridges/primitives/parachains" +default-features = false +optional = true + +[dependencies.bp-polkadot] +path = "../bridges/chains/chain-polkadot" +default-features = false +optional = true + +[dependencies.bp-polkadot-bulletin] +path = "../bridges/chains/chain-polkadot-bulletin" +default-features = false +optional = true + +[dependencies.bp-polkadot-core] +path = "../bridges/primitives/polkadot-core" +default-features = false +optional = true + +[dependencies.bp-relayers] +path = "../bridges/primitives/relayers" +default-features = false +optional = true + +[dependencies.bp-rococo] +path = "../bridges/chains/chain-rococo" +default-features = false +optional = true + +[dependencies.bp-runtime] +path = "../bridges/primitives/runtime" +default-features = false +optional = true + +[dependencies.bp-test-utils] +path = "../bridges/primitives/test-utils" +default-features = false +optional = true + +[dependencies.bp-westend] +path = "../bridges/chains/chain-westend" +default-features = false +optional = true + +[dependencies.bp-xcm-bridge-hub] +path = "../bridges/primitives/xcm-bridge-hub" +default-features = false +optional = true + +[dependencies.bp-xcm-bridge-hub-router] +path = "../bridges/primitives/xcm-bridge-hub-router" +default-features = false +optional = true + +[dependencies.bridge-hub-common] +path = "../cumulus/parachains/runtimes/bridge-hubs/common" +default-features = false +optional = true + +[dependencies.bridge-runtime-common] +path = "../bridges/bin/runtime-common" +default-features = false +optional = true + +[dependencies.cumulus-pallet-aura-ext] +path = "../cumulus/pallets/aura-ext" +default-features = false +optional = true + +[dependencies.cumulus-pallet-dmp-queue] +path = "../cumulus/pallets/dmp-queue" +default-features = false +optional = true + +[dependencies.cumulus-pallet-parachain-system] +path = "../cumulus/pallets/parachain-system" +default-features = false +optional = true + +[dependencies.cumulus-pallet-parachain-system-proc-macro] +path = "../cumulus/pallets/parachain-system/proc-macro" +default-features = false +optional = true + +[dependencies.cumulus-pallet-session-benchmarking] +path = "../cumulus/pallets/session-benchmarking" +default-features = false +optional = true + +[dependencies.cumulus-pallet-solo-to-para] +path = "../cumulus/pallets/solo-to-para" +default-features = false +optional = true + +[dependencies.cumulus-pallet-xcm] +path = "../cumulus/pallets/xcm" +default-features = false +optional = true + +[dependencies.cumulus-pallet-xcmp-queue] +path = "../cumulus/pallets/xcmp-queue" +default-features = false +optional = true + +[dependencies.cumulus-ping] +path = "../cumulus/parachains/pallets/ping" +default-features = false +optional = true + +[dependencies.cumulus-primitives-aura] +path = "../cumulus/primitives/aura" +default-features = false +optional = true + +[dependencies.cumulus-primitives-core] +path = "../cumulus/primitives/core" +default-features = false +optional = true + +[dependencies.cumulus-primitives-parachain-inherent] +path = "../cumulus/primitives/parachain-inherent" +default-features = false +optional = true + +[dependencies.cumulus-primitives-proof-size-hostfunction] +path = "../cumulus/primitives/proof-size-hostfunction" +default-features = false +optional = true + +[dependencies.cumulus-primitives-storage-weight-reclaim] +path = "../cumulus/primitives/storage-weight-reclaim" +default-features = false +optional = true + +[dependencies.cumulus-primitives-timestamp] +path = "../cumulus/primitives/timestamp" +default-features = false +optional = true + +[dependencies.cumulus-primitives-utility] +path = "../cumulus/primitives/utility" +default-features = false +optional = true + +[dependencies.frame-benchmarking] +path = "../substrate/frame/benchmarking" +default-features = false +optional = true + +[dependencies.frame-benchmarking-pallet-pov] +path = "../substrate/frame/benchmarking/pov" +default-features = false +optional = true + +[dependencies.frame-election-provider-solution-type] +path = "../substrate/frame/election-provider-support/solution-type" +default-features = false +optional = true + +[dependencies.frame-election-provider-support] +path = "../substrate/frame/election-provider-support" +default-features = false +optional = true + +[dependencies.frame-executive] +path = "../substrate/frame/executive" +default-features = false +optional = true + +[dependencies.frame-metadata-hash-extension] +path = "../substrate/frame/metadata-hash-extension" +default-features = false +optional = true + +[dependencies.frame-support] +path = "../substrate/frame/support" +default-features = false +optional = true + +[dependencies.frame-support-procedural] +path = "../substrate/frame/support/procedural" +default-features = false +optional = true + +[dependencies.frame-support-procedural-tools-derive] +path = "../substrate/frame/support/procedural/tools/derive" +default-features = false +optional = true + +[dependencies.frame-system] +path = "../substrate/frame/system" +default-features = false +optional = true + +[dependencies.frame-system-benchmarking] +path = "../substrate/frame/system/benchmarking" +default-features = false +optional = true + +[dependencies.frame-system-rpc-runtime-api] +path = "../substrate/frame/system/rpc/runtime-api" +default-features = false +optional = true + +[dependencies.frame-try-runtime] +path = "../substrate/frame/try-runtime" +default-features = false +optional = true + +[dependencies.pallet-alliance] +path = "../substrate/frame/alliance" +default-features = false +optional = true + +[dependencies.pallet-asset-conversion] +path = "../substrate/frame/asset-conversion" +default-features = false +optional = true + +[dependencies.pallet-asset-conversion-ops] +path = "../substrate/frame/asset-conversion/ops" +default-features = false +optional = true + +[dependencies.pallet-asset-conversion-tx-payment] +path = "../substrate/frame/transaction-payment/asset-conversion-tx-payment" +default-features = false +optional = true + +[dependencies.pallet-asset-rate] +path = "../substrate/frame/asset-rate" +default-features = false +optional = true + +[dependencies.pallet-asset-tx-payment] +path = "../substrate/frame/transaction-payment/asset-tx-payment" +default-features = false +optional = true + +[dependencies.pallet-assets] +path = "../substrate/frame/assets" +default-features = false +optional = true + +[dependencies.pallet-atomic-swap] +path = "../substrate/frame/atomic-swap" +default-features = false +optional = true + +[dependencies.pallet-aura] +path = "../substrate/frame/aura" +default-features = false +optional = true + +[dependencies.pallet-authority-discovery] +path = "../substrate/frame/authority-discovery" +default-features = false +optional = true + +[dependencies.pallet-authorship] +path = "../substrate/frame/authorship" +default-features = false +optional = true + +[dependencies.pallet-babe] +path = "../substrate/frame/babe" +default-features = false +optional = true + +[dependencies.pallet-bags-list] +path = "../substrate/frame/bags-list" +default-features = false +optional = true + +[dependencies.pallet-balances] +path = "../substrate/frame/balances" +default-features = false +optional = true + +[dependencies.pallet-beefy] +path = "../substrate/frame/beefy" +default-features = false +optional = true + +[dependencies.pallet-beefy-mmr] +path = "../substrate/frame/beefy-mmr" +default-features = false +optional = true + +[dependencies.pallet-bounties] +path = "../substrate/frame/bounties" +default-features = false +optional = true + +[dependencies.pallet-bridge-grandpa] +path = "../bridges/modules/grandpa" +default-features = false +optional = true + +[dependencies.pallet-bridge-messages] +path = "../bridges/modules/messages" +default-features = false +optional = true + +[dependencies.pallet-bridge-parachains] +path = "../bridges/modules/parachains" +default-features = false +optional = true + +[dependencies.pallet-bridge-relayers] +path = "../bridges/modules/relayers" +default-features = false +optional = true + +[dependencies.pallet-broker] +path = "../substrate/frame/broker" +default-features = false +optional = true + +[dependencies.pallet-child-bounties] +path = "../substrate/frame/child-bounties" +default-features = false +optional = true + +[dependencies.pallet-collator-selection] +path = "../cumulus/pallets/collator-selection" +default-features = false +optional = true + +[dependencies.pallet-collective] +path = "../substrate/frame/collective" +default-features = false +optional = true + +[dependencies.pallet-collective-content] +path = "../cumulus/parachains/pallets/collective-content" +default-features = false +optional = true + +[dependencies.pallet-contracts] +path = "../substrate/frame/contracts" +default-features = false +optional = true + +[dependencies.pallet-contracts-proc-macro] +path = "../substrate/frame/contracts/proc-macro" +default-features = false +optional = true + +[dependencies.pallet-contracts-uapi] +path = "../substrate/frame/contracts/uapi" +default-features = false +optional = true + +[dependencies.pallet-conviction-voting] +path = "../substrate/frame/conviction-voting" +default-features = false +optional = true + +[dependencies.pallet-core-fellowship] +path = "../substrate/frame/core-fellowship" +default-features = false +optional = true + +[dependencies.pallet-delegated-staking] +path = "../substrate/frame/delegated-staking" +default-features = false +optional = true + +[dependencies.pallet-democracy] +path = "../substrate/frame/democracy" +default-features = false +optional = true + +[dependencies.pallet-dev-mode] +path = "../substrate/frame/examples/dev-mode" +default-features = false +optional = true + +[dependencies.pallet-election-provider-multi-phase] +path = "../substrate/frame/election-provider-multi-phase" +default-features = false +optional = true + +[dependencies.pallet-election-provider-support-benchmarking] +path = "../substrate/frame/election-provider-support/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-elections-phragmen] +path = "../substrate/frame/elections-phragmen" +default-features = false +optional = true + +[dependencies.pallet-fast-unstake] +path = "../substrate/frame/fast-unstake" +default-features = false +optional = true + +[dependencies.pallet-glutton] +path = "../substrate/frame/glutton" +default-features = false +optional = true + +[dependencies.pallet-grandpa] +path = "../substrate/frame/grandpa" +default-features = false +optional = true + +[dependencies.pallet-identity] +path = "../substrate/frame/identity" +default-features = false +optional = true + +[dependencies.pallet-im-online] +path = "../substrate/frame/im-online" +default-features = false +optional = true + +[dependencies.pallet-indices] +path = "../substrate/frame/indices" +default-features = false +optional = true + +[dependencies.pallet-insecure-randomness-collective-flip] +path = "../substrate/frame/insecure-randomness-collective-flip" +default-features = false +optional = true + +[dependencies.pallet-lottery] +path = "../substrate/frame/lottery" +default-features = false +optional = true + +[dependencies.pallet-membership] +path = "../substrate/frame/membership" +default-features = false +optional = true + +[dependencies.pallet-message-queue] +path = "../substrate/frame/message-queue" +default-features = false +optional = true + +[dependencies.pallet-migrations] +path = "../substrate/frame/migrations" +default-features = false +optional = true + +[dependencies.pallet-mixnet] +path = "../substrate/frame/mixnet" +default-features = false +optional = true + +[dependencies.pallet-mmr] +path = "../substrate/frame/merkle-mountain-range" +default-features = false +optional = true + +[dependencies.pallet-multisig] +path = "../substrate/frame/multisig" +default-features = false +optional = true + +[dependencies.pallet-nft-fractionalization] +path = "../substrate/frame/nft-fractionalization" +default-features = false +optional = true + +[dependencies.pallet-nfts] +path = "../substrate/frame/nfts" +default-features = false +optional = true + +[dependencies.pallet-nfts-runtime-api] +path = "../substrate/frame/nfts/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-nis] +path = "../substrate/frame/nis" +default-features = false +optional = true + +[dependencies.pallet-node-authorization] +path = "../substrate/frame/node-authorization" +default-features = false +optional = true + +[dependencies.pallet-nomination-pools] +path = "../substrate/frame/nomination-pools" +default-features = false +optional = true + +[dependencies.pallet-nomination-pools-benchmarking] +path = "../substrate/frame/nomination-pools/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-nomination-pools-runtime-api] +path = "../substrate/frame/nomination-pools/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-offences] +path = "../substrate/frame/offences" +default-features = false +optional = true + +[dependencies.pallet-offences-benchmarking] +path = "../substrate/frame/offences/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-paged-list] +path = "../substrate/frame/paged-list" +default-features = false +optional = true + +[dependencies.pallet-parameters] +path = "../substrate/frame/parameters" +default-features = false +optional = true + +[dependencies.pallet-preimage] +path = "../substrate/frame/preimage" +default-features = false +optional = true + +[dependencies.pallet-proxy] +path = "../substrate/frame/proxy" +default-features = false +optional = true + +[dependencies.pallet-ranked-collective] +path = "../substrate/frame/ranked-collective" +default-features = false +optional = true + +[dependencies.pallet-recovery] +path = "../substrate/frame/recovery" +default-features = false +optional = true + +[dependencies.pallet-referenda] +path = "../substrate/frame/referenda" +default-features = false +optional = true + +[dependencies.pallet-remark] +path = "../substrate/frame/remark" +default-features = false +optional = true + +[dependencies.pallet-root-offences] +path = "../substrate/frame/root-offences" +default-features = false +optional = true + +[dependencies.pallet-root-testing] +path = "../substrate/frame/root-testing" +default-features = false +optional = true + +[dependencies.pallet-safe-mode] +path = "../substrate/frame/safe-mode" +default-features = false +optional = true + +[dependencies.pallet-salary] +path = "../substrate/frame/salary" +default-features = false +optional = true + +[dependencies.pallet-scheduler] +path = "../substrate/frame/scheduler" +default-features = false +optional = true + +[dependencies.pallet-scored-pool] +path = "../substrate/frame/scored-pool" +default-features = false +optional = true + +[dependencies.pallet-session] +path = "../substrate/frame/session" +default-features = false +optional = true + +[dependencies.pallet-session-benchmarking] +path = "../substrate/frame/session/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-skip-feeless-payment] +path = "../substrate/frame/transaction-payment/skip-feeless-payment" +default-features = false +optional = true + +[dependencies.pallet-society] +path = "../substrate/frame/society" +default-features = false +optional = true + +[dependencies.pallet-staking] +path = "../substrate/frame/staking" +default-features = false +optional = true + +[dependencies.pallet-staking-reward-curve] +path = "../substrate/frame/staking/reward-curve" +default-features = false +optional = true + +[dependencies.pallet-staking-reward-fn] +path = "../substrate/frame/staking/reward-fn" +default-features = false +optional = true + +[dependencies.pallet-staking-runtime-api] +path = "../substrate/frame/staking/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-state-trie-migration] +path = "../substrate/frame/state-trie-migration" +default-features = false +optional = true + +[dependencies.pallet-statement] +path = "../substrate/frame/statement" +default-features = false +optional = true + +[dependencies.pallet-sudo] +path = "../substrate/frame/sudo" +default-features = false +optional = true + +[dependencies.pallet-timestamp] +path = "../substrate/frame/timestamp" +default-features = false +optional = true + +[dependencies.pallet-tips] +path = "../substrate/frame/tips" +default-features = false +optional = true + +[dependencies.pallet-transaction-payment] +path = "../substrate/frame/transaction-payment" +default-features = false +optional = true + +[dependencies.pallet-transaction-payment-rpc-runtime-api] +path = "../substrate/frame/transaction-payment/rpc/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-transaction-storage] +path = "../substrate/frame/transaction-storage" +default-features = false +optional = true + +[dependencies.pallet-treasury] +path = "../substrate/frame/treasury" +default-features = false +optional = true + +[dependencies.pallet-tx-pause] +path = "../substrate/frame/tx-pause" +default-features = false +optional = true + +[dependencies.pallet-uniques] +path = "../substrate/frame/uniques" +default-features = false +optional = true + +[dependencies.pallet-utility] +path = "../substrate/frame/utility" +default-features = false +optional = true + +[dependencies.pallet-vesting] +path = "../substrate/frame/vesting" +default-features = false +optional = true + +[dependencies.pallet-whitelist] +path = "../substrate/frame/whitelist" +default-features = false +optional = true + +[dependencies.pallet-xcm] +path = "../polkadot/xcm/pallet-xcm" +default-features = false +optional = true + +[dependencies.pallet-xcm-benchmarks] +path = "../polkadot/xcm/pallet-xcm-benchmarks" +default-features = false +optional = true + +[dependencies.pallet-xcm-bridge-hub] +path = "../bridges/modules/xcm-bridge-hub" +default-features = false +optional = true + +[dependencies.pallet-xcm-bridge-hub-router] +path = "../bridges/modules/xcm-bridge-hub-router" +default-features = false +optional = true + +[dependencies.parachains-common] +path = "../cumulus/parachains/common" +default-features = false +optional = true + +[dependencies.polkadot-core-primitives] +path = "../polkadot/core-primitives" +default-features = false +optional = true + +[dependencies.polkadot-parachain-primitives] +path = "../polkadot/parachain" +default-features = false +optional = true + +[dependencies.polkadot-primitives] +path = "../polkadot/primitives" +default-features = false +optional = true + +[dependencies.polkadot-runtime-common] +path = "../polkadot/runtime/common" +default-features = false +optional = true + +[dependencies.polkadot-runtime-metrics] +path = "../polkadot/runtime/metrics" +default-features = false +optional = true + +[dependencies.polkadot-runtime-parachains] +path = "../polkadot/runtime/parachains" +default-features = false +optional = true + +[dependencies.polkadot-sdk-frame] +path = "../substrate/frame" +default-features = false +optional = true + +[dependencies.rococo-runtime-constants] +path = "../polkadot/runtime/rococo/constants" +default-features = false +optional = true + +[dependencies.sc-chain-spec-derive] +path = "../substrate/client/chain-spec/derive" +default-features = false +optional = true + +[dependencies.sc-tracing-proc-macro] +path = "../substrate/client/tracing/proc-macro" +default-features = false +optional = true + +[dependencies.slot-range-helper] +path = "../polkadot/runtime/common/slot_range_helper" +default-features = false +optional = true + +[dependencies.snowbridge-beacon-primitives] +path = "../bridges/snowbridge/primitives/beacon" +default-features = false +optional = true + +[dependencies.snowbridge-core] +path = "../bridges/snowbridge/primitives/core" +default-features = false +optional = true + +[dependencies.snowbridge-ethereum] +path = "../bridges/snowbridge/primitives/ethereum" +default-features = false +optional = true + +[dependencies.snowbridge-outbound-queue-merkle-tree] +path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" +default-features = false +optional = true + +[dependencies.snowbridge-outbound-queue-runtime-api] +path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-ethereum-client] +path = "../bridges/snowbridge/pallets/ethereum-client" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-ethereum-client-fixtures] +path = "../bridges/snowbridge/pallets/ethereum-client/fixtures" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-inbound-queue] +path = "../bridges/snowbridge/pallets/inbound-queue" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-inbound-queue-fixtures] +path = "../bridges/snowbridge/pallets/inbound-queue/fixtures" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-outbound-queue] +path = "../bridges/snowbridge/pallets/outbound-queue" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-system] +path = "../bridges/snowbridge/pallets/system" +default-features = false +optional = true + +[dependencies.snowbridge-router-primitives] +path = "../bridges/snowbridge/primitives/router" +default-features = false +optional = true + +[dependencies.snowbridge-runtime-common] +path = "../bridges/snowbridge/runtime/runtime-common" +default-features = false +optional = true + +[dependencies.snowbridge-system-runtime-api] +path = "../bridges/snowbridge/pallets/system/runtime-api" +default-features = false +optional = true + +[dependencies.sp-api] +path = "../substrate/primitives/api" +default-features = false +optional = true + +[dependencies.sp-api-proc-macro] +path = "../substrate/primitives/api/proc-macro" +default-features = false +optional = true + +[dependencies.sp-application-crypto] +path = "../substrate/primitives/application-crypto" +default-features = false +optional = true + +[dependencies.sp-arithmetic] +path = "../substrate/primitives/arithmetic" +default-features = false +optional = true + +[dependencies.sp-authority-discovery] +path = "../substrate/primitives/authority-discovery" +default-features = false +optional = true + +[dependencies.sp-block-builder] +path = "../substrate/primitives/block-builder" +default-features = false +optional = true + +[dependencies.sp-consensus-aura] +path = "../substrate/primitives/consensus/aura" +default-features = false +optional = true + +[dependencies.sp-consensus-babe] +path = "../substrate/primitives/consensus/babe" +default-features = false +optional = true + +[dependencies.sp-consensus-beefy] +path = "../substrate/primitives/consensus/beefy" +default-features = false +optional = true + +[dependencies.sp-consensus-grandpa] +path = "../substrate/primitives/consensus/grandpa" +default-features = false +optional = true + +[dependencies.sp-consensus-pow] +path = "../substrate/primitives/consensus/pow" +default-features = false +optional = true + +[dependencies.sp-consensus-slots] +path = "../substrate/primitives/consensus/slots" +default-features = false +optional = true + +[dependencies.sp-core] +path = "../substrate/primitives/core" +default-features = false +optional = true + +[dependencies.sp-crypto-ec-utils] +path = "../substrate/primitives/crypto/ec-utils" +default-features = false +optional = true + +[dependencies.sp-crypto-hashing] +path = "../substrate/primitives/crypto/hashing" +default-features = false +optional = true + +[dependencies.sp-crypto-hashing-proc-macro] +path = "../substrate/primitives/crypto/hashing/proc-macro" +default-features = false +optional = true + +[dependencies.sp-debug-derive] +path = "../substrate/primitives/debug-derive" +default-features = false +optional = true + +[dependencies.sp-externalities] +path = "../substrate/primitives/externalities" +default-features = false +optional = true + +[dependencies.sp-genesis-builder] +path = "../substrate/primitives/genesis-builder" +default-features = false +optional = true + +[dependencies.sp-inherents] +path = "../substrate/primitives/inherents" +default-features = false +optional = true + +[dependencies.sp-io] +path = "../substrate/primitives/io" +default-features = false +optional = true + +[dependencies.sp-keyring] +path = "../substrate/primitives/keyring" +default-features = false +optional = true + +[dependencies.sp-keystore] +path = "../substrate/primitives/keystore" +default-features = false +optional = true + +[dependencies.sp-metadata-ir] +path = "../substrate/primitives/metadata-ir" +default-features = false +optional = true + +[dependencies.sp-mixnet] +path = "../substrate/primitives/mixnet" +default-features = false +optional = true + +[dependencies.sp-mmr-primitives] +path = "../substrate/primitives/merkle-mountain-range" +default-features = false +optional = true + +[dependencies.sp-npos-elections] +path = "../substrate/primitives/npos-elections" +default-features = false +optional = true + +[dependencies.sp-offchain] +path = "../substrate/primitives/offchain" +default-features = false +optional = true + +[dependencies.sp-runtime] +path = "../substrate/primitives/runtime" +default-features = false +optional = true + +[dependencies.sp-runtime-interface] +path = "../substrate/primitives/runtime-interface" +default-features = false +optional = true + +[dependencies.sp-runtime-interface-proc-macro] +path = "../substrate/primitives/runtime-interface/proc-macro" +default-features = false +optional = true + +[dependencies.sp-session] +path = "../substrate/primitives/session" +default-features = false +optional = true + +[dependencies.sp-staking] +path = "../substrate/primitives/staking" +default-features = false +optional = true + +[dependencies.sp-state-machine] +path = "../substrate/primitives/state-machine" +default-features = false +optional = true + +[dependencies.sp-statement-store] +path = "../substrate/primitives/statement-store" +default-features = false +optional = true + +[dependencies.sp-std] +path = "../substrate/primitives/std" +default-features = false +optional = true + +[dependencies.sp-storage] +path = "../substrate/primitives/storage" +default-features = false +optional = true + +[dependencies.sp-timestamp] +path = "../substrate/primitives/timestamp" +default-features = false +optional = true + +[dependencies.sp-tracing] +path = "../substrate/primitives/tracing" +default-features = false +optional = true + +[dependencies.sp-transaction-pool] +path = "../substrate/primitives/transaction-pool" +default-features = false +optional = true + +[dependencies.sp-transaction-storage-proof] +path = "../substrate/primitives/transaction-storage-proof" +default-features = false +optional = true + +[dependencies.sp-trie] +path = "../substrate/primitives/trie" +default-features = false +optional = true + +[dependencies.sp-version] +path = "../substrate/primitives/version" +default-features = false +optional = true + +[dependencies.sp-version-proc-macro] +path = "../substrate/primitives/version/proc-macro" +default-features = false +optional = true + +[dependencies.sp-wasm-interface] +path = "../substrate/primitives/wasm-interface" +default-features = false +optional = true + +[dependencies.sp-weights] +path = "../substrate/primitives/weights" +default-features = false +optional = true + +[dependencies.staging-parachain-info] +path = "../cumulus/parachains/pallets/parachain-info" +default-features = false +optional = true + +[dependencies.staging-xcm] +path = "../polkadot/xcm" +default-features = false +optional = true + +[dependencies.staging-xcm-builder] +path = "../polkadot/xcm/xcm-builder" +default-features = false +optional = true + +[dependencies.staging-xcm-executor] +path = "../polkadot/xcm/xcm-executor" +default-features = false +optional = true + +[dependencies.substrate-bip39] +path = "../substrate/utils/substrate-bip39" +default-features = false +optional = true + +[dependencies.testnet-parachains-constants] +path = "../cumulus/parachains/runtimes/constants" +default-features = false +optional = true + +[dependencies.tracing-gum-proc-macro] +path = "../polkadot/node/gum/proc-macro" +default-features = false +optional = true + +[dependencies.westend-runtime-constants] +path = "../polkadot/runtime/westend/constants" +default-features = false +optional = true + +[dependencies.xcm-fee-payment-runtime-api] +path = "../polkadot/xcm/xcm-fee-payment-runtime-api" +default-features = false +optional = true + +[dependencies.xcm-procedural] +path = "../polkadot/xcm/procedural" +default-features = false +optional = true + +[dependencies.asset-test-utils] +path = "../cumulus/parachains/runtimes/assets/test-utils" +default-features = false +optional = true + +[dependencies.bridge-hub-test-utils] +path = "../cumulus/parachains/runtimes/bridge-hubs/test-utils" +default-features = false +optional = true + +[dependencies.cumulus-client-cli] +path = "../cumulus/client/cli" +default-features = false +optional = true + +[dependencies.cumulus-client-collator] +path = "../cumulus/client/collator" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-aura] +path = "../cumulus/client/consensus/aura" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-common] +path = "../cumulus/client/consensus/common" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-proposer] +path = "../cumulus/client/consensus/proposer" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-relay-chain] +path = "../cumulus/client/consensus/relay-chain" +default-features = false +optional = true + +[dependencies.cumulus-client-network] +path = "../cumulus/client/network" +default-features = false +optional = true + +[dependencies.cumulus-client-parachain-inherent] +path = "../cumulus/client/parachain-inherent" +default-features = false +optional = true + +[dependencies.cumulus-client-pov-recovery] +path = "../cumulus/client/pov-recovery" +default-features = false +optional = true + +[dependencies.cumulus-client-service] +path = "../cumulus/client/service" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-inprocess-interface] +path = "../cumulus/client/relay-chain-inprocess-interface" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-interface] +path = "../cumulus/client/relay-chain-interface" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-minimal-node] +path = "../cumulus/client/relay-chain-minimal-node" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-rpc-interface] +path = "../cumulus/client/relay-chain-rpc-interface" +default-features = false +optional = true + +[dependencies.cumulus-test-relay-sproof-builder] +path = "../cumulus/test/relay-sproof-builder" +default-features = false +optional = true + +[dependencies.emulated-integration-tests-common] +path = "../cumulus/parachains/integration-tests/emulated/common" +default-features = false +optional = true + +[dependencies.fork-tree] +path = "../substrate/utils/fork-tree" +default-features = false +optional = true + +[dependencies.frame-benchmarking-cli] +path = "../substrate/utils/frame/benchmarking-cli" +default-features = false +optional = true + +[dependencies.frame-remote-externalities] +path = "../substrate/utils/frame/remote-externalities" +default-features = false +optional = true + +[dependencies.frame-support-procedural-tools] +path = "../substrate/frame/support/procedural/tools" +default-features = false +optional = true + +[dependencies.generate-bags] +path = "../substrate/utils/frame/generate-bags" +default-features = false +optional = true + +[dependencies.mmr-gadget] +path = "../substrate/client/merkle-mountain-range" +default-features = false +optional = true + +[dependencies.mmr-rpc] +path = "../substrate/client/merkle-mountain-range/rpc" +default-features = false +optional = true + +[dependencies.pallet-contracts-mock-network] +path = "../substrate/frame/contracts/mock-network" +default-features = false +optional = true + +[dependencies.pallet-transaction-payment-rpc] +path = "../substrate/frame/transaction-payment/rpc" +default-features = false +optional = true + +[dependencies.parachains-runtimes-test-utils] +path = "../cumulus/parachains/runtimes/test-utils" +default-features = false +optional = true + +[dependencies.polkadot-approval-distribution] +path = "../polkadot/node/network/approval-distribution" +default-features = false +optional = true + +[dependencies.polkadot-availability-bitfield-distribution] +path = "../polkadot/node/network/bitfield-distribution" +default-features = false +optional = true + +[dependencies.polkadot-availability-distribution] +path = "../polkadot/node/network/availability-distribution" +default-features = false +optional = true + +[dependencies.polkadot-availability-recovery] +path = "../polkadot/node/network/availability-recovery" +default-features = false +optional = true + +[dependencies.polkadot-cli] +path = "../polkadot/cli" +default-features = false +optional = true + +[dependencies.polkadot-collator-protocol] +path = "../polkadot/node/network/collator-protocol" +default-features = false +optional = true + +[dependencies.polkadot-dispute-distribution] +path = "../polkadot/node/network/dispute-distribution" +default-features = false +optional = true + +[dependencies.polkadot-erasure-coding] +path = "../polkadot/erasure-coding" +default-features = false +optional = true + +[dependencies.polkadot-gossip-support] +path = "../polkadot/node/network/gossip-support" +default-features = false +optional = true + +[dependencies.polkadot-network-bridge] +path = "../polkadot/node/network/bridge" +default-features = false +optional = true + +[dependencies.polkadot-node-collation-generation] +path = "../polkadot/node/collation-generation" +default-features = false +optional = true + +[dependencies.polkadot-node-core-approval-voting] +path = "../polkadot/node/core/approval-voting" +default-features = false +optional = true + +[dependencies.polkadot-node-core-av-store] +path = "../polkadot/node/core/av-store" +default-features = false +optional = true + +[dependencies.polkadot-node-core-backing] +path = "../polkadot/node/core/backing" +default-features = false +optional = true + +[dependencies.polkadot-node-core-bitfield-signing] +path = "../polkadot/node/core/bitfield-signing" +default-features = false +optional = true + +[dependencies.polkadot-node-core-candidate-validation] +path = "../polkadot/node/core/candidate-validation" +default-features = false +optional = true + +[dependencies.polkadot-node-core-chain-api] +path = "../polkadot/node/core/chain-api" +default-features = false +optional = true + +[dependencies.polkadot-node-core-chain-selection] +path = "../polkadot/node/core/chain-selection" +default-features = false +optional = true + +[dependencies.polkadot-node-core-dispute-coordinator] +path = "../polkadot/node/core/dispute-coordinator" +default-features = false +optional = true + +[dependencies.polkadot-node-core-parachains-inherent] +path = "../polkadot/node/core/parachains-inherent" +default-features = false +optional = true + +[dependencies.polkadot-node-core-prospective-parachains] +path = "../polkadot/node/core/prospective-parachains" +default-features = false +optional = true + +[dependencies.polkadot-node-core-provisioner] +path = "../polkadot/node/core/provisioner" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf] +path = "../polkadot/node/core/pvf" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-checker] +path = "../polkadot/node/core/pvf-checker" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-common] +path = "../polkadot/node/core/pvf/common" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-execute-worker] +path = "../polkadot/node/core/pvf/execute-worker" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-prepare-worker] +path = "../polkadot/node/core/pvf/prepare-worker" +default-features = false +optional = true + +[dependencies.polkadot-node-core-runtime-api] +path = "../polkadot/node/core/runtime-api" +default-features = false +optional = true + +[dependencies.polkadot-node-jaeger] +path = "../polkadot/node/jaeger" +default-features = false +optional = true + +[dependencies.polkadot-node-metrics] +path = "../polkadot/node/metrics" +default-features = false +optional = true + +[dependencies.polkadot-node-network-protocol] +path = "../polkadot/node/network/protocol" +default-features = false +optional = true + +[dependencies.polkadot-node-primitives] +path = "../polkadot/node/primitives" +default-features = false +optional = true + +[dependencies.polkadot-node-subsystem] +path = "../polkadot/node/subsystem" +default-features = false +optional = true + +[dependencies.polkadot-node-subsystem-types] +path = "../polkadot/node/subsystem-types" +default-features = false +optional = true + +[dependencies.polkadot-node-subsystem-util] +path = "../polkadot/node/subsystem-util" +default-features = false +optional = true + +[dependencies.polkadot-overseer] +path = "../polkadot/node/overseer" +default-features = false +optional = true + +[dependencies.polkadot-rpc] +path = "../polkadot/rpc" +default-features = false +optional = true + +[dependencies.polkadot-service] +path = "../polkadot/node/service" +default-features = false +optional = true + +[dependencies.polkadot-statement-distribution] +path = "../polkadot/node/network/statement-distribution" +default-features = false +optional = true + +[dependencies.polkadot-statement-table] +path = "../polkadot/statement-table" +default-features = false +optional = true + +[dependencies.sc-allocator] +path = "../substrate/client/allocator" +default-features = false +optional = true + +[dependencies.sc-authority-discovery] +path = "../substrate/client/authority-discovery" +default-features = false +optional = true + +[dependencies.sc-basic-authorship] +path = "../substrate/client/basic-authorship" +default-features = false +optional = true + +[dependencies.sc-block-builder] +path = "../substrate/client/block-builder" +default-features = false +optional = true + +[dependencies.sc-chain-spec] +path = "../substrate/client/chain-spec" +default-features = false +optional = true + +[dependencies.sc-cli] +path = "../substrate/client/cli" +default-features = false +optional = true + +[dependencies.sc-client-api] +path = "../substrate/client/api" +default-features = false +optional = true + +[dependencies.sc-client-db] +path = "../substrate/client/db" +default-features = false +optional = true + +[dependencies.sc-consensus] +path = "../substrate/client/consensus/common" +default-features = false +optional = true + +[dependencies.sc-consensus-aura] +path = "../substrate/client/consensus/aura" +default-features = false +optional = true + +[dependencies.sc-consensus-babe] +path = "../substrate/client/consensus/babe" +default-features = false +optional = true + +[dependencies.sc-consensus-babe-rpc] +path = "../substrate/client/consensus/babe/rpc" +default-features = false +optional = true + +[dependencies.sc-consensus-beefy] +path = "../substrate/client/consensus/beefy" +default-features = false +optional = true + +[dependencies.sc-consensus-beefy-rpc] +path = "../substrate/client/consensus/beefy/rpc" +default-features = false +optional = true + +[dependencies.sc-consensus-epochs] +path = "../substrate/client/consensus/epochs" +default-features = false +optional = true + +[dependencies.sc-consensus-grandpa] +path = "../substrate/client/consensus/grandpa" +default-features = false +optional = true + +[dependencies.sc-consensus-grandpa-rpc] +path = "../substrate/client/consensus/grandpa/rpc" +default-features = false +optional = true + +[dependencies.sc-consensus-manual-seal] +path = "../substrate/client/consensus/manual-seal" +default-features = false +optional = true + +[dependencies.sc-consensus-pow] +path = "../substrate/client/consensus/pow" +default-features = false +optional = true + +[dependencies.sc-consensus-slots] +path = "../substrate/client/consensus/slots" +default-features = false +optional = true + +[dependencies.sc-executor] +path = "../substrate/client/executor" +default-features = false +optional = true + +[dependencies.sc-executor-common] +path = "../substrate/client/executor/common" +default-features = false +optional = true + +[dependencies.sc-executor-polkavm] +path = "../substrate/client/executor/polkavm" +default-features = false +optional = true + +[dependencies.sc-executor-wasmtime] +path = "../substrate/client/executor/wasmtime" +default-features = false +optional = true + +[dependencies.sc-informant] +path = "../substrate/client/informant" +default-features = false +optional = true + +[dependencies.sc-keystore] +path = "../substrate/client/keystore" +default-features = false +optional = true + +[dependencies.sc-mixnet] +path = "../substrate/client/mixnet" +default-features = false +optional = true + +[dependencies.sc-network] +path = "../substrate/client/network" +default-features = false +optional = true + +[dependencies.sc-network-common] +path = "../substrate/client/network/common" +default-features = false +optional = true + +[dependencies.sc-network-gossip] +path = "../substrate/client/network-gossip" +default-features = false +optional = true + +[dependencies.sc-network-light] +path = "../substrate/client/network/light" +default-features = false +optional = true + +[dependencies.sc-network-statement] +path = "../substrate/client/network/statement" +default-features = false +optional = true + +[dependencies.sc-network-sync] +path = "../substrate/client/network/sync" +default-features = false +optional = true + +[dependencies.sc-network-transactions] +path = "../substrate/client/network/transactions" +default-features = false +optional = true + +[dependencies.sc-network-types] +path = "../substrate/client/network/types" +default-features = false +optional = true + +[dependencies.sc-offchain] +path = "../substrate/client/offchain" +default-features = false +optional = true + +[dependencies.sc-proposer-metrics] +path = "../substrate/client/proposer-metrics" +default-features = false +optional = true + +[dependencies.sc-rpc] +path = "../substrate/client/rpc" +default-features = false +optional = true + +[dependencies.sc-rpc-api] +path = "../substrate/client/rpc-api" +default-features = false +optional = true + +[dependencies.sc-rpc-server] +path = "../substrate/client/rpc-servers" +default-features = false +optional = true + +[dependencies.sc-rpc-spec-v2] +path = "../substrate/client/rpc-spec-v2" +default-features = false +optional = true + +[dependencies.sc-service] +path = "../substrate/client/service" +default-features = false +optional = true + +[dependencies.sc-state-db] +path = "../substrate/client/state-db" +default-features = false +optional = true + +[dependencies.sc-statement-store] +path = "../substrate/client/statement-store" +default-features = false +optional = true + +[dependencies.sc-storage-monitor] +path = "../substrate/client/storage-monitor" +default-features = false +optional = true + +[dependencies.sc-sync-state-rpc] +path = "../substrate/client/sync-state-rpc" +default-features = false +optional = true + +[dependencies.sc-sysinfo] +path = "../substrate/client/sysinfo" +default-features = false +optional = true + +[dependencies.sc-telemetry] +path = "../substrate/client/telemetry" +default-features = false +optional = true + +[dependencies.sc-tracing] +path = "../substrate/client/tracing" +default-features = false +optional = true + +[dependencies.sc-transaction-pool] +path = "../substrate/client/transaction-pool" +default-features = false +optional = true + +[dependencies.sc-transaction-pool-api] +path = "../substrate/client/transaction-pool/api" +default-features = false +optional = true + +[dependencies.sc-utils] +path = "../substrate/client/utils" +default-features = false +optional = true + +[dependencies.snowbridge-runtime-test-common] +path = "../bridges/snowbridge/runtime/test-common" +default-features = false +optional = true + +[dependencies.sp-blockchain] +path = "../substrate/primitives/blockchain" +default-features = false +optional = true + +[dependencies.sp-consensus] +path = "../substrate/primitives/consensus/common" +default-features = false +optional = true + +[dependencies.sp-core-hashing] +path = "../substrate/deprecated/hashing" +default-features = false +optional = true + +[dependencies.sp-core-hashing-proc-macro] +path = "../substrate/deprecated/hashing/proc-macro" +default-features = false +optional = true + +[dependencies.sp-database] +path = "../substrate/primitives/database" +default-features = false +optional = true + +[dependencies.sp-maybe-compressed-blob] +path = "../substrate/primitives/maybe-compressed-blob" +default-features = false +optional = true + +[dependencies.sp-panic-handler] +path = "../substrate/primitives/panic-handler" +default-features = false +optional = true + +[dependencies.sp-rpc] +path = "../substrate/primitives/rpc" +default-features = false +optional = true + +[dependencies.staging-node-inspect] +path = "../substrate/bin/node/inspect" +default-features = false +optional = true + +[dependencies.staging-tracking-allocator] +path = "../polkadot/node/tracking-allocator" +default-features = false +optional = true + +[dependencies.subkey] +path = "../substrate/bin/utils/subkey" +default-features = false +optional = true + +[dependencies.substrate-build-script-utils] +path = "../substrate/utils/build-script-utils" +default-features = false +optional = true + +[dependencies.substrate-frame-rpc-support] +path = "../substrate/utils/frame/rpc/support" +default-features = false +optional = true + +[dependencies.substrate-frame-rpc-system] +path = "../substrate/utils/frame/rpc/system" +default-features = false +optional = true + +[dependencies.substrate-prometheus-endpoint] +path = "../substrate/utils/prometheus" +default-features = false +optional = true + +[dependencies.substrate-rpc-client] +path = "../substrate/utils/frame/rpc/client" +default-features = false +optional = true + +[dependencies.substrate-state-trie-migration-rpc] +path = "../substrate/utils/frame/rpc/state-trie-migration-rpc" +default-features = false +optional = true + +[dependencies.substrate-wasm-builder] +path = "../substrate/utils/wasm-builder" +default-features = false +optional = true + +[dependencies.tracing-gum] +path = "../polkadot/node/gum" +default-features = false +optional = true + +[dependencies.xcm-emulator] +path = "../cumulus/xcm/xcm-emulator" +default-features = false +optional = true + +[dependencies.xcm-simulator] +path = "../polkadot/xcm/xcm-simulator" +default-features = false +optional = true + +[package.metadata.docs.rs] +features = ["node", "runtime"] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs new file mode 100644 index 00000000000..2e87c186eda --- /dev/null +++ b/umbrella/src/lib.rs @@ -0,0 +1,1564 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Polkadot SDK umbrella crate re-exporting all other published crates. +//! +//! This helps to set a single version number for all your dependencies. Docs are in the +//! `polkadot-sdk-docs` crate. + +// This file is auto-generated and checked by the CI. You can edit it manually, but it must be +// exactly the way that the CI expects it. + +/// Test utils for Asset Hub runtimes. +#[cfg(feature = "asset-test-utils")] +pub use asset_test_utils; + +/// Assets common utilities. +#[cfg(feature = "assets-common")] +pub use assets_common; + +/// A no-std/Substrate compatible library to construct binary merkle tree. +#[cfg(feature = "binary-merkle-tree")] +pub use binary_merkle_tree; + +/// Primitives of AssetHubRococo parachain runtime. +#[cfg(feature = "bp-asset-hub-rococo")] +pub use bp_asset_hub_rococo; + +/// Primitives of AssetHubWestend parachain runtime. +#[cfg(feature = "bp-asset-hub-westend")] +pub use bp_asset_hub_westend; + +/// Primitives for BridgeHub parachain runtimes. +#[cfg(feature = "bp-bridge-hub-cumulus")] +pub use bp_bridge_hub_cumulus; + +/// Primitives of BridgeHubKusama parachain runtime. +#[cfg(feature = "bp-bridge-hub-kusama")] +pub use bp_bridge_hub_kusama; + +/// Primitives of BridgeHubPolkadot parachain runtime. +#[cfg(feature = "bp-bridge-hub-polkadot")] +pub use bp_bridge_hub_polkadot; + +/// Primitives of BridgeHubRococo parachain runtime. +#[cfg(feature = "bp-bridge-hub-rococo")] +pub use bp_bridge_hub_rococo; + +/// Primitives of BridgeHubWestend parachain runtime. +#[cfg(feature = "bp-bridge-hub-westend")] +pub use bp_bridge_hub_westend; + +/// A common interface for describing what a bridge pallet should be able to do. +#[cfg(feature = "bp-header-chain")] +pub use bp_header_chain; + +/// Primitives of Kusama runtime. +#[cfg(feature = "bp-kusama")] +pub use bp_kusama; + +/// Primitives of messages module. +#[cfg(feature = "bp-messages")] +pub use bp_messages; + +/// Primitives of parachains module. +#[cfg(feature = "bp-parachains")] +pub use bp_parachains; + +/// Primitives of Polkadot runtime. +#[cfg(feature = "bp-polkadot")] +pub use bp_polkadot; + +/// Primitives of Polkadot Bulletin chain runtime. +#[cfg(feature = "bp-polkadot-bulletin")] +pub use bp_polkadot_bulletin; + +/// Primitives of Polkadot-like runtime. +#[cfg(feature = "bp-polkadot-core")] +pub use bp_polkadot_core; + +/// Primitives of relayers module. +#[cfg(feature = "bp-relayers")] +pub use bp_relayers; + +/// Primitives of Rococo runtime. +#[cfg(feature = "bp-rococo")] +pub use bp_rococo; + +/// Primitives that may be used at (bridges) runtime level. +#[cfg(feature = "bp-runtime")] +pub use bp_runtime; + +/// Utilities for testing substrate-based runtime bridge code. +#[cfg(feature = "bp-test-utils")] +pub use bp_test_utils; + +/// Primitives of Westend runtime. +#[cfg(feature = "bp-westend")] +pub use bp_westend; + +/// Primitives of the xcm-bridge-hub pallet. +#[cfg(feature = "bp-xcm-bridge-hub")] +pub use bp_xcm_bridge_hub; + +/// Primitives of the xcm-bridge-hub fee pallet. +#[cfg(feature = "bp-xcm-bridge-hub-router")] +pub use bp_xcm_bridge_hub_router; + +/// Bridge hub common utilities. +#[cfg(feature = "bridge-hub-common")] +pub use bridge_hub_common; + +/// Utils for BridgeHub testing. +#[cfg(feature = "bridge-hub-test-utils")] +pub use bridge_hub_test_utils; + +/// Common types and functions that may be used by substrate-based runtimes of all bridged +/// chains. +#[cfg(feature = "bridge-runtime-common")] +pub use bridge_runtime_common; + +/// Parachain node CLI utilities. +#[cfg(feature = "cumulus-client-cli")] +pub use cumulus_client_cli; + +/// Common node-side functionality and glue code to collate parachain blocks. +#[cfg(feature = "cumulus-client-collator")] +pub use cumulus_client_collator; + +/// AURA consensus algorithm for parachains. +#[cfg(feature = "cumulus-client-consensus-aura")] +pub use cumulus_client_consensus_aura; + +/// Cumulus specific common consensus implementations. +#[cfg(feature = "cumulus-client-consensus-common")] +pub use cumulus_client_consensus_common; + +/// A Substrate `Proposer` for building parachain blocks. +#[cfg(feature = "cumulus-client-consensus-proposer")] +pub use cumulus_client_consensus_proposer; + +/// The relay-chain provided consensus algorithm. +#[cfg(feature = "cumulus-client-consensus-relay-chain")] +pub use cumulus_client_consensus_relay_chain; + +/// Cumulus-specific networking protocol. +#[cfg(feature = "cumulus-client-network")] +pub use cumulus_client_network; + +/// Inherent that needs to be present in every parachain block. Contains messages and a relay +/// chain storage-proof. +#[cfg(feature = "cumulus-client-parachain-inherent")] +pub use cumulus_client_parachain_inherent; + +/// Cumulus-specific networking protocol. +#[cfg(feature = "cumulus-client-pov-recovery")] +pub use cumulus_client_pov_recovery; + +/// Common functions used to assemble the components of a parachain node. +#[cfg(feature = "cumulus-client-service")] +pub use cumulus_client_service; + +/// AURA consensus extension pallet for parachains. +#[cfg(feature = "cumulus-pallet-aura-ext")] +pub use cumulus_pallet_aura_ext; + +/// Migrates messages from the old DMP queue pallet. +#[cfg(feature = "cumulus-pallet-dmp-queue")] +pub use cumulus_pallet_dmp_queue; + +/// Base pallet for cumulus-based parachains. +#[cfg(feature = "cumulus-pallet-parachain-system")] +pub use cumulus_pallet_parachain_system; + +/// Proc macros provided by the parachain-system pallet. +#[cfg(feature = "cumulus-pallet-parachain-system-proc-macro")] +pub use cumulus_pallet_parachain_system_proc_macro; + +/// FRAME sessions pallet benchmarking. +#[cfg(feature = "cumulus-pallet-session-benchmarking")] +pub use cumulus_pallet_session_benchmarking; + +/// Adds functionality to migrate from a Solo to a Parachain. +#[cfg(feature = "cumulus-pallet-solo-to-para")] +pub use cumulus_pallet_solo_to_para; + +/// Pallet for stuff specific to parachains' usage of XCM. +#[cfg(feature = "cumulus-pallet-xcm")] +pub use cumulus_pallet_xcm; + +/// Pallet to queue outbound and inbound XCMP messages. +#[cfg(feature = "cumulus-pallet-xcmp-queue")] +pub use cumulus_pallet_xcmp_queue; + +/// Ping Pallet for Cumulus XCM/UMP testing. +#[cfg(feature = "cumulus-ping")] +pub use cumulus_ping; + +/// Core primitives for Aura in Cumulus. +#[cfg(feature = "cumulus-primitives-aura")] +pub use cumulus_primitives_aura; + +/// Cumulus related core primitive types and traits. +#[cfg(feature = "cumulus-primitives-core")] +pub use cumulus_primitives_core; + +/// Inherent that needs to be present in every parachain block. Contains messages and a relay +/// chain storage-proof. +#[cfg(feature = "cumulus-primitives-parachain-inherent")] +pub use cumulus_primitives_parachain_inherent; + +/// Hostfunction exposing storage proof size to the runtime. +#[cfg(feature = "cumulus-primitives-proof-size-hostfunction")] +pub use cumulus_primitives_proof_size_hostfunction; + +/// Utilities to reclaim storage weight. +#[cfg(feature = "cumulus-primitives-storage-weight-reclaim")] +pub use cumulus_primitives_storage_weight_reclaim; + +/// Provides timestamp related functionality for parachains. +#[cfg(feature = "cumulus-primitives-timestamp")] +pub use cumulus_primitives_timestamp; + +/// Helper datatypes for Cumulus. +#[cfg(feature = "cumulus-primitives-utility")] +pub use cumulus_primitives_utility; + +/// Implementation of the RelayChainInterface trait for Polkadot full-nodes. +#[cfg(feature = "cumulus-relay-chain-inprocess-interface")] +pub use cumulus_relay_chain_inprocess_interface; + +/// Common interface for different relay chain datasources. +#[cfg(feature = "cumulus-relay-chain-interface")] +pub use cumulus_relay_chain_interface; + +/// Minimal node implementation to be used in tandem with RPC or light-client mode. +#[cfg(feature = "cumulus-relay-chain-minimal-node")] +pub use cumulus_relay_chain_minimal_node; + +/// Implementation of the RelayChainInterface trait that connects to a remote RPC-node. +#[cfg(feature = "cumulus-relay-chain-rpc-interface")] +pub use cumulus_relay_chain_rpc_interface; + +/// Mocked relay state proof builder for testing Cumulus. +#[cfg(feature = "cumulus-test-relay-sproof-builder")] +pub use cumulus_test_relay_sproof_builder; + +/// Common resources for integration testing with xcm-emulator. +#[cfg(feature = "emulated-integration-tests-common")] +pub use emulated_integration_tests_common; + +/// Utility library for managing tree-like ordered data with logic for pruning the tree while +/// finalizing nodes. +#[cfg(feature = "fork-tree")] +pub use fork_tree; + +/// Macro for benchmarking a FRAME runtime. +#[cfg(feature = "frame-benchmarking")] +pub use frame_benchmarking; + +/// CLI for benchmarking FRAME. +#[cfg(feature = "frame-benchmarking-cli")] +pub use frame_benchmarking_cli; + +/// Pallet for testing FRAME PoV benchmarking. +#[cfg(feature = "frame-benchmarking-pallet-pov")] +pub use frame_benchmarking_pallet_pov; + +/// NPoS Solution Type. +#[cfg(feature = "frame-election-provider-solution-type")] +pub use frame_election_provider_solution_type; + +/// election provider supporting traits. +#[cfg(feature = "frame-election-provider-support")] +pub use frame_election_provider_support; + +/// FRAME executives engine. +#[cfg(feature = "frame-executive")] +pub use frame_executive; + +/// FRAME signed extension for verifying the metadata hash. +#[cfg(feature = "frame-metadata-hash-extension")] +pub use frame_metadata_hash_extension; + +/// An externalities provided environment that can load itself from remote nodes or cached +/// files. +#[cfg(feature = "frame-remote-externalities")] +pub use frame_remote_externalities; + +/// Support code for the runtime. +#[cfg(feature = "frame-support")] +pub use frame_support; + +/// Proc macro of Support code for the runtime. +#[cfg(feature = "frame-support-procedural")] +pub use frame_support_procedural; + +/// Proc macro helpers for procedural macros. +#[cfg(feature = "frame-support-procedural-tools")] +pub use frame_support_procedural_tools; + +/// Use to derive parsing for parsing struct. +#[cfg(feature = "frame-support-procedural-tools-derive")] +pub use frame_support_procedural_tools_derive; + +/// FRAME system module. +#[cfg(feature = "frame-system")] +pub use frame_system; + +/// FRAME System benchmarking. +#[cfg(feature = "frame-system-benchmarking")] +pub use frame_system_benchmarking; + +/// Runtime API definition required by System RPC extensions. +#[cfg(feature = "frame-system-rpc-runtime-api")] +pub use frame_system_rpc_runtime_api; + +/// FRAME pallet for democracy. +#[cfg(feature = "frame-try-runtime")] +pub use frame_try_runtime; + +/// Bag threshold generation script for pallet-bag-list. +#[cfg(feature = "generate-bags")] +pub use generate_bags; + +/// MMR Client gadget for substrate. +#[cfg(feature = "mmr-gadget")] +pub use mmr_gadget; + +/// Node-specific RPC methods for interaction with Merkle Mountain Range pallet. +#[cfg(feature = "mmr-rpc")] +pub use mmr_rpc; + +/// The Alliance pallet provides a collective for standard-setting industry collaboration. +#[cfg(feature = "pallet-alliance")] +pub use pallet_alliance; + +/// FRAME asset conversion pallet. +#[cfg(feature = "pallet-asset-conversion")] +pub use pallet_asset_conversion; + +/// FRAME asset conversion pallet's operations suite. +#[cfg(feature = "pallet-asset-conversion-ops")] +pub use pallet_asset_conversion_ops; + +/// Pallet to manage transaction payments in assets by converting them to native assets. +#[cfg(feature = "pallet-asset-conversion-tx-payment")] +pub use pallet_asset_conversion_tx_payment; + +/// Whitelist non-native assets for treasury spending and provide conversion to native balance. +#[cfg(feature = "pallet-asset-rate")] +pub use pallet_asset_rate; + +/// pallet to manage transaction payments in assets. +#[cfg(feature = "pallet-asset-tx-payment")] +pub use pallet_asset_tx_payment; + +/// FRAME asset management pallet. +#[cfg(feature = "pallet-assets")] +pub use pallet_assets; + +/// FRAME atomic swap pallet. +#[cfg(feature = "pallet-atomic-swap")] +pub use pallet_atomic_swap; + +/// FRAME AURA consensus pallet. +#[cfg(feature = "pallet-aura")] +pub use pallet_aura; + +/// FRAME pallet for authority discovery. +#[cfg(feature = "pallet-authority-discovery")] +pub use pallet_authority_discovery; + +/// Block and Uncle Author tracking for the FRAME. +#[cfg(feature = "pallet-authorship")] +pub use pallet_authorship; + +/// Consensus extension module for BABE consensus. Collects on-chain randomness from VRF +/// outputs and manages epoch transitions. +#[cfg(feature = "pallet-babe")] +pub use pallet_babe; + +/// FRAME pallet bags list. +#[cfg(feature = "pallet-bags-list")] +pub use pallet_bags_list; + +/// FRAME pallet to manage balances. +#[cfg(feature = "pallet-balances")] +pub use pallet_balances; + +/// BEEFY FRAME pallet. +#[cfg(feature = "pallet-beefy")] +pub use pallet_beefy; + +/// BEEFY + MMR runtime utilities. +#[cfg(feature = "pallet-beefy-mmr")] +pub use pallet_beefy_mmr; + +/// FRAME pallet to manage bounties. +#[cfg(feature = "pallet-bounties")] +pub use pallet_bounties; + +/// Module implementing GRANDPA on-chain light client used for bridging consensus of +/// substrate-based chains. +#[cfg(feature = "pallet-bridge-grandpa")] +pub use pallet_bridge_grandpa; + +/// Module that allows bridged chains to exchange messages using lane concept. +#[cfg(feature = "pallet-bridge-messages")] +pub use pallet_bridge_messages; + +/// Module that allows bridged relay chains to exchange information on their parachains' heads. +#[cfg(feature = "pallet-bridge-parachains")] +pub use pallet_bridge_parachains; + +/// Module used to store relayer rewards and coordinate relayers set. +#[cfg(feature = "pallet-bridge-relayers")] +pub use pallet_bridge_relayers; + +/// Brokerage tool for managing Polkadot Core scheduling. +#[cfg(feature = "pallet-broker")] +pub use pallet_broker; + +/// FRAME pallet to manage child bounties. +#[cfg(feature = "pallet-child-bounties")] +pub use pallet_child_bounties; + +/// Simple pallet to select collators for a parachain. +#[cfg(feature = "pallet-collator-selection")] +pub use pallet_collator_selection; + +/// Collective system: Members of a set of account IDs can make their collective feelings known +/// through dispatched calls from one of two specialized origins. +#[cfg(feature = "pallet-collective")] +pub use pallet_collective; + +/// Managed content. +#[cfg(feature = "pallet-collective-content")] +pub use pallet_collective_content; + +/// FRAME pallet for WASM contracts. +#[cfg(feature = "pallet-contracts")] +pub use pallet_contracts; + +/// A mock network for testing pallet-contracts. +#[cfg(feature = "pallet-contracts-mock-network")] +pub use pallet_contracts_mock_network; + +/// Procedural macros used in pallet_contracts. +#[cfg(feature = "pallet-contracts-proc-macro")] +pub use pallet_contracts_proc_macro; + +/// Exposes all the host functions that a contract can import. +#[cfg(feature = "pallet-contracts-uapi")] +pub use pallet_contracts_uapi; + +/// FRAME pallet for conviction voting in referenda. +#[cfg(feature = "pallet-conviction-voting")] +pub use pallet_conviction_voting; + +/// Logic as per the description of The Fellowship for core Polkadot technology. +#[cfg(feature = "pallet-core-fellowship")] +pub use pallet_core_fellowship; + +/// FRAME delegated staking pallet. +#[cfg(feature = "pallet-delegated-staking")] +pub use pallet_delegated_staking; + +/// FRAME pallet for democracy. +#[cfg(feature = "pallet-democracy")] +pub use pallet_democracy; + +/// FRAME example pallet. +#[cfg(feature = "pallet-dev-mode")] +pub use pallet_dev_mode; + +/// PALLET two phase election providers. +#[cfg(feature = "pallet-election-provider-multi-phase")] +pub use pallet_election_provider_multi_phase; + +/// Benchmarking for election provider support onchain config trait. +#[cfg(feature = "pallet-election-provider-support-benchmarking")] +pub use pallet_election_provider_support_benchmarking; + +/// FRAME pallet based on seq-Phragmรฉn election method. +#[cfg(feature = "pallet-elections-phragmen")] +pub use pallet_elections_phragmen; + +/// FRAME fast unstake pallet. +#[cfg(feature = "pallet-fast-unstake")] +pub use pallet_fast_unstake; + +/// FRAME pallet for pushing a chain to its weight limits. +#[cfg(feature = "pallet-glutton")] +pub use pallet_glutton; + +/// FRAME pallet for GRANDPA finality gadget. +#[cfg(feature = "pallet-grandpa")] +pub use pallet_grandpa; + +/// FRAME identity management pallet. +#[cfg(feature = "pallet-identity")] +pub use pallet_identity; + +/// FRAME's I'm online pallet. +#[cfg(feature = "pallet-im-online")] +pub use pallet_im_online; + +/// FRAME indices management pallet. +#[cfg(feature = "pallet-indices")] +pub use pallet_indices; + +/// Insecure do not use in production: FRAME randomness collective flip pallet. +#[cfg(feature = "pallet-insecure-randomness-collective-flip")] +pub use pallet_insecure_randomness_collective_flip; + +/// FRAME Participation Lottery Pallet. +#[cfg(feature = "pallet-lottery")] +pub use pallet_lottery; + +/// FRAME membership management pallet. +#[cfg(feature = "pallet-membership")] +pub use pallet_membership; + +/// FRAME pallet to queue and process messages. +#[cfg(feature = "pallet-message-queue")] +pub use pallet_message_queue; + +/// FRAME pallet to execute multi-block migrations. +#[cfg(feature = "pallet-migrations")] +pub use pallet_migrations; + +/// FRAME's mixnet pallet. +#[cfg(feature = "pallet-mixnet")] +pub use pallet_mixnet; + +/// FRAME Merkle Mountain Range pallet. +#[cfg(feature = "pallet-mmr")] +pub use pallet_mmr; + +/// FRAME multi-signature dispatch pallet. +#[cfg(feature = "pallet-multisig")] +pub use pallet_multisig; + +/// FRAME pallet to convert non-fungible to fungible tokens. +#[cfg(feature = "pallet-nft-fractionalization")] +pub use pallet_nft_fractionalization; + +/// FRAME NFTs pallet. +#[cfg(feature = "pallet-nfts")] +pub use pallet_nfts; + +/// Runtime API for the FRAME NFTs pallet. +#[cfg(feature = "pallet-nfts-runtime-api")] +pub use pallet_nfts_runtime_api; + +/// FRAME pallet for rewarding account freezing. +#[cfg(feature = "pallet-nis")] +pub use pallet_nis; + +/// FRAME pallet for node authorization. +#[cfg(feature = "pallet-node-authorization")] +pub use pallet_node_authorization; + +/// FRAME nomination pools pallet. +#[cfg(feature = "pallet-nomination-pools")] +pub use pallet_nomination_pools; + +/// FRAME nomination pools pallet benchmarking. +#[cfg(feature = "pallet-nomination-pools-benchmarking")] +pub use pallet_nomination_pools_benchmarking; + +/// Runtime API for nomination-pools FRAME pallet. +#[cfg(feature = "pallet-nomination-pools-runtime-api")] +pub use pallet_nomination_pools_runtime_api; + +/// FRAME offences pallet. +#[cfg(feature = "pallet-offences")] +pub use pallet_offences; + +/// FRAME offences pallet benchmarking. +#[cfg(feature = "pallet-offences-benchmarking")] +pub use pallet_offences_benchmarking; + +/// FRAME pallet that provides a paged list data structure. +#[cfg(feature = "pallet-paged-list")] +pub use pallet_paged_list; + +/// Pallet to store and configure parameters. +#[cfg(feature = "pallet-parameters")] +pub use pallet_parameters; + +/// FRAME pallet for storing preimages of hashes. +#[cfg(feature = "pallet-preimage")] +pub use pallet_preimage; + +/// FRAME proxying pallet. +#[cfg(feature = "pallet-proxy")] +pub use pallet_proxy; + +/// Ranked collective system: Members of a set of account IDs can make their collective +/// feelings known through dispatched calls from one of two specialized origins. +#[cfg(feature = "pallet-ranked-collective")] +pub use pallet_ranked_collective; + +/// FRAME account recovery pallet. +#[cfg(feature = "pallet-recovery")] +pub use pallet_recovery; + +/// FRAME pallet for inclusive on-chain decisions. +#[cfg(feature = "pallet-referenda")] +pub use pallet_referenda; + +/// Remark storage pallet. +#[cfg(feature = "pallet-remark")] +pub use pallet_remark; + +/// FRAME root offences pallet. +#[cfg(feature = "pallet-root-offences")] +pub use pallet_root_offences; + +/// FRAME root testing pallet. +#[cfg(feature = "pallet-root-testing")] +pub use pallet_root_testing; + +/// FRAME safe-mode pallet. +#[cfg(feature = "pallet-safe-mode")] +pub use pallet_safe_mode; + +/// Paymaster. +#[cfg(feature = "pallet-salary")] +pub use pallet_salary; + +/// FRAME Scheduler pallet. +#[cfg(feature = "pallet-scheduler")] +pub use pallet_scheduler; + +/// FRAME pallet for scored pools. +#[cfg(feature = "pallet-scored-pool")] +pub use pallet_scored_pool; + +/// FRAME sessions pallet. +#[cfg(feature = "pallet-session")] +pub use pallet_session; + +/// FRAME sessions pallet benchmarking. +#[cfg(feature = "pallet-session-benchmarking")] +pub use pallet_session_benchmarking; + +/// Pallet to skip payments for calls annotated with `feeless_if` if the respective conditions +/// are satisfied. +#[cfg(feature = "pallet-skip-feeless-payment")] +pub use pallet_skip_feeless_payment; + +/// FRAME society pallet. +#[cfg(feature = "pallet-society")] +pub use pallet_society; + +/// FRAME pallet staking. +#[cfg(feature = "pallet-staking")] +pub use pallet_staking; + +/// Reward Curve for FRAME staking pallet. +#[cfg(feature = "pallet-staking-reward-curve")] +pub use pallet_staking_reward_curve; + +/// Reward function for FRAME staking pallet. +#[cfg(feature = "pallet-staking-reward-fn")] +pub use pallet_staking_reward_fn; + +/// RPC runtime API for transaction payment FRAME pallet. +#[cfg(feature = "pallet-staking-runtime-api")] +pub use pallet_staking_runtime_api; + +/// FRAME pallet migration of trie. +#[cfg(feature = "pallet-state-trie-migration")] +pub use pallet_state_trie_migration; + +/// FRAME pallet for statement store. +#[cfg(feature = "pallet-statement")] +pub use pallet_statement; + +/// FRAME pallet for sudo. +#[cfg(feature = "pallet-sudo")] +pub use pallet_sudo; + +/// FRAME Timestamp Module. +#[cfg(feature = "pallet-timestamp")] +pub use pallet_timestamp; + +/// FRAME pallet to manage tips. +#[cfg(feature = "pallet-tips")] +pub use pallet_tips; + +/// FRAME pallet to manage transaction payments. +#[cfg(feature = "pallet-transaction-payment")] +pub use pallet_transaction_payment; + +/// RPC interface for the transaction payment pallet. +#[cfg(feature = "pallet-transaction-payment-rpc")] +pub use pallet_transaction_payment_rpc; + +/// RPC runtime API for transaction payment FRAME pallet. +#[cfg(feature = "pallet-transaction-payment-rpc-runtime-api")] +pub use pallet_transaction_payment_rpc_runtime_api; + +/// Storage chain pallet. +#[cfg(feature = "pallet-transaction-storage")] +pub use pallet_transaction_storage; + +/// FRAME pallet to manage treasury. +#[cfg(feature = "pallet-treasury")] +pub use pallet_treasury; + +/// FRAME transaction pause pallet. +#[cfg(feature = "pallet-tx-pause")] +pub use pallet_tx_pause; + +/// FRAME NFT asset management pallet. +#[cfg(feature = "pallet-uniques")] +pub use pallet_uniques; + +/// FRAME utilities pallet. +#[cfg(feature = "pallet-utility")] +pub use pallet_utility; + +/// FRAME pallet for manage vesting. +#[cfg(feature = "pallet-vesting")] +pub use pallet_vesting; + +/// FRAME pallet for whitelisting call, and dispatch from specific origin. +#[cfg(feature = "pallet-whitelist")] +pub use pallet_whitelist; + +/// A pallet for handling XCM programs. +#[cfg(feature = "pallet-xcm")] +pub use pallet_xcm; + +/// Benchmarks for the XCM pallet. +#[cfg(feature = "pallet-xcm-benchmarks")] +pub use pallet_xcm_benchmarks; + +/// Module that adds dynamic bridges/lanes support to XCM infrastructure at the bridge hub. +#[cfg(feature = "pallet-xcm-bridge-hub")] +pub use pallet_xcm_bridge_hub; + +/// Bridge hub interface for sibling/parent chains with dynamic fees support. +#[cfg(feature = "pallet-xcm-bridge-hub-router")] +pub use pallet_xcm_bridge_hub_router; + +/// Logic which is common to all parachain runtimes. +#[cfg(feature = "parachains-common")] +pub use parachains_common; + +/// Utils for Runtimes testing. +#[cfg(feature = "parachains-runtimes-test-utils")] +pub use parachains_runtimes_test_utils; + +/// Polkadot Approval Distribution subsystem for the distribution of assignments and approvals +/// for approval checks on candidates over the network. +#[cfg(feature = "polkadot-approval-distribution")] +pub use polkadot_approval_distribution; + +/// Polkadot Bitfiled Distribution subsystem, which gossips signed availability bitfields used +/// to compactly determine which backed candidates are available or not based on a 2/3+ quorum. +#[cfg(feature = "polkadot-availability-bitfield-distribution")] +pub use polkadot_availability_bitfield_distribution; + +/// The Availability Distribution subsystem. Requests the required availability data. Also +/// distributes availability data and chunks to requesters. +#[cfg(feature = "polkadot-availability-distribution")] +pub use polkadot_availability_distribution; + +/// The Availability Recovery subsystem. Handles requests for recovering the availability data +/// of included candidates. +#[cfg(feature = "polkadot-availability-recovery")] +pub use polkadot_availability_recovery; + +/// Polkadot Relay-chain Client Node. +#[cfg(feature = "polkadot-cli")] +pub use polkadot_cli; + +/// Polkadot Collator Protocol subsystem. Allows collators and validators to talk to each +/// other. +#[cfg(feature = "polkadot-collator-protocol")] +pub use polkadot_collator_protocol; + +/// Core Polkadot types used by Relay Chains and parachains. +#[cfg(feature = "polkadot-core-primitives")] +pub use polkadot_core_primitives; + +/// Polkadot Dispute Distribution subsystem, which ensures all concerned validators are aware +/// of a dispute and have the relevant votes. +#[cfg(feature = "polkadot-dispute-distribution")] +pub use polkadot_dispute_distribution; + +/// Erasure coding used for Polkadot's availability system. +#[cfg(feature = "polkadot-erasure-coding")] +pub use polkadot_erasure_coding; + +/// Polkadot Gossip Support subsystem. Responsible for keeping track of session changes and +/// issuing a connection request to the relevant validators on every new session. +#[cfg(feature = "polkadot-gossip-support")] +pub use polkadot_gossip_support; + +/// The Network Bridge Subsystem โ€” protocol multiplexer for Polkadot. +#[cfg(feature = "polkadot-network-bridge")] +pub use polkadot_network_bridge; + +/// Collator-side subsystem that handles incoming candidate submissions from the parachain. +#[cfg(feature = "polkadot-node-collation-generation")] +pub use polkadot_node_collation_generation; + +/// Approval Voting Subsystem of the Polkadot node. +#[cfg(feature = "polkadot-node-core-approval-voting")] +pub use polkadot_node_core_approval_voting; + +/// The Availability Store subsystem. Wrapper over the DB that stores availability data and +/// chunks. +#[cfg(feature = "polkadot-node-core-av-store")] +pub use polkadot_node_core_av_store; + +/// The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as +/// the issuance of statements about candidates. +#[cfg(feature = "polkadot-node-core-backing")] +pub use polkadot_node_core_backing; + +/// Bitfield signing subsystem for the Polkadot node. +#[cfg(feature = "polkadot-node-core-bitfield-signing")] +pub use polkadot_node_core_bitfield_signing; + +/// Polkadot crate that implements the Candidate Validation subsystem. Handles requests to +/// validate candidates according to a PVF. +#[cfg(feature = "polkadot-node-core-candidate-validation")] +pub use polkadot_node_core_candidate_validation; + +/// The Chain API subsystem provides access to chain related utility functions like block +/// number to hash conversions. +#[cfg(feature = "polkadot-node-core-chain-api")] +pub use polkadot_node_core_chain_api; + +/// Chain Selection Subsystem. +#[cfg(feature = "polkadot-node-core-chain-selection")] +pub use polkadot_node_core_chain_selection; + +/// The node-side components that participate in disputes. +#[cfg(feature = "polkadot-node-core-dispute-coordinator")] +pub use polkadot_node_core_dispute_coordinator; + +/// Parachains inherent data provider for Polkadot node. +#[cfg(feature = "polkadot-node-core-parachains-inherent")] +pub use polkadot_node_core_parachains_inherent; + +/// The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments. +#[cfg(feature = "polkadot-node-core-prospective-parachains")] +pub use polkadot_node_core_prospective_parachains; + +/// Responsible for assembling a relay chain block from a set of available parachain +/// candidates. +#[cfg(feature = "polkadot-node-core-provisioner")] +pub use polkadot_node_core_provisioner; + +/// Polkadot crate that implements the PVF validation host. Responsible for coordinating +/// preparation and execution of PVFs. +#[cfg(feature = "polkadot-node-core-pvf")] +pub use polkadot_node_core_pvf; + +/// Polkadot crate that implements the PVF pre-checking subsystem. Responsible for checking and +/// voting for PVFs that are pending approval. +#[cfg(feature = "polkadot-node-core-pvf-checker")] +pub use polkadot_node_core_pvf_checker; + +/// Polkadot crate that contains functionality related to PVFs that is shared by the PVF host +/// and the PVF workers. +#[cfg(feature = "polkadot-node-core-pvf-common")] +pub use polkadot_node_core_pvf_common; + +/// Polkadot crate that contains the logic for executing PVFs. Used by the +/// polkadot-execute-worker binary. +#[cfg(feature = "polkadot-node-core-pvf-execute-worker")] +pub use polkadot_node_core_pvf_execute_worker; + +/// Polkadot crate that contains the logic for preparing PVFs. Used by the +/// polkadot-prepare-worker binary. +#[cfg(feature = "polkadot-node-core-pvf-prepare-worker")] +pub use polkadot_node_core_pvf_prepare_worker; + +/// Wrapper around the parachain-related runtime APIs. +#[cfg(feature = "polkadot-node-core-runtime-api")] +pub use polkadot_node_core_runtime_api; + +/// Polkadot Jaeger primitives, but equally useful for Grafana/Tempo. +#[cfg(feature = "polkadot-node-jaeger")] +pub use polkadot_node_jaeger; + +/// Subsystem metric helpers. +#[cfg(feature = "polkadot-node-metrics")] +pub use polkadot_node_metrics; + +/// Primitives types for the Node-side. +#[cfg(feature = "polkadot-node-network-protocol")] +pub use polkadot_node_network_protocol; + +/// Primitives types for the Node-side. +#[cfg(feature = "polkadot-node-primitives")] +pub use polkadot_node_primitives; + +/// Subsystem traits and message definitions and the generated overseer. +#[cfg(feature = "polkadot-node-subsystem")] +pub use polkadot_node_subsystem; + +/// Subsystem traits and message definitions. +#[cfg(feature = "polkadot-node-subsystem-types")] +pub use polkadot_node_subsystem_types; + +/// Subsystem traits and message definitions. +#[cfg(feature = "polkadot-node-subsystem-util")] +pub use polkadot_node_subsystem_util; + +/// System overseer of the Polkadot node. +#[cfg(feature = "polkadot-overseer")] +pub use polkadot_overseer; + +/// Types and utilities for creating and working with parachains. +#[cfg(feature = "polkadot-parachain-primitives")] +pub use polkadot_parachain_primitives; + +/// Shared primitives used by Polkadot runtime. +#[cfg(feature = "polkadot-primitives")] +pub use polkadot_primitives; + +/// Polkadot specific RPC functionality. +#[cfg(feature = "polkadot-rpc")] +pub use polkadot_rpc; + +/// Pallets and constants used in Relay Chain networks. +#[cfg(feature = "polkadot-runtime-common")] +pub use polkadot_runtime_common; + +/// Runtime metric interface for the Polkadot node. +#[cfg(feature = "polkadot-runtime-metrics")] +pub use polkadot_runtime_metrics; + +/// Relay Chain runtime code responsible for Parachains. +#[cfg(feature = "polkadot-runtime-parachains")] +pub use polkadot_runtime_parachains; + +/// Experimental: The single package to get you started with building frame pallets and +/// runtimes. +#[cfg(feature = "polkadot-sdk-frame")] +pub use polkadot_sdk_frame; + +/// Utils to tie different Polkadot components together and allow instantiation of a node. +#[cfg(feature = "polkadot-service")] +pub use polkadot_service; + +/// Statement Distribution Subsystem. +#[cfg(feature = "polkadot-statement-distribution")] +pub use polkadot_statement_distribution; + +/// Stores messages other authorities issue about candidates in Polkadot. +#[cfg(feature = "polkadot-statement-table")] +pub use polkadot_statement_table; + +/// Constants used throughout the Rococo network. +#[cfg(feature = "rococo-runtime-constants")] +pub use rococo_runtime_constants; + +/// Collection of allocator implementations. +#[cfg(feature = "sc-allocator")] +pub use sc_allocator; + +/// Substrate authority discovery. +#[cfg(feature = "sc-authority-discovery")] +pub use sc_authority_discovery; + +/// Basic implementation of block-authoring logic. +#[cfg(feature = "sc-basic-authorship")] +pub use sc_basic_authorship; + +/// Substrate block builder. +#[cfg(feature = "sc-block-builder")] +pub use sc_block_builder; + +/// Substrate chain configurations. +#[cfg(feature = "sc-chain-spec")] +pub use sc_chain_spec; + +/// Macros to derive chain spec extension traits implementation. +#[cfg(feature = "sc-chain-spec-derive")] +pub use sc_chain_spec_derive; + +/// Substrate CLI interface. +#[cfg(feature = "sc-cli")] +pub use sc_cli; + +/// Substrate client interfaces. +#[cfg(feature = "sc-client-api")] +pub use sc_client_api; + +/// Client backend that uses RocksDB database as storage. +#[cfg(feature = "sc-client-db")] +pub use sc_client_db; + +/// Collection of common consensus specific implementations for Substrate (client). +#[cfg(feature = "sc-consensus")] +pub use sc_consensus; + +/// Aura consensus algorithm for substrate. +#[cfg(feature = "sc-consensus-aura")] +pub use sc_consensus_aura; + +/// BABE consensus algorithm for substrate. +#[cfg(feature = "sc-consensus-babe")] +pub use sc_consensus_babe; + +/// RPC extensions for the BABE consensus algorithm. +#[cfg(feature = "sc-consensus-babe-rpc")] +pub use sc_consensus_babe_rpc; + +/// BEEFY Client gadget for substrate. +#[cfg(feature = "sc-consensus-beefy")] +pub use sc_consensus_beefy; + +/// RPC for the BEEFY Client gadget for substrate. +#[cfg(feature = "sc-consensus-beefy-rpc")] +pub use sc_consensus_beefy_rpc; + +/// Generic epochs-based utilities for consensus. +#[cfg(feature = "sc-consensus-epochs")] +pub use sc_consensus_epochs; + +/// Integration of the GRANDPA finality gadget into substrate. +#[cfg(feature = "sc-consensus-grandpa")] +pub use sc_consensus_grandpa; + +/// RPC extensions for the GRANDPA finality gadget. +#[cfg(feature = "sc-consensus-grandpa-rpc")] +pub use sc_consensus_grandpa_rpc; + +/// Manual sealing engine for Substrate. +#[cfg(feature = "sc-consensus-manual-seal")] +pub use sc_consensus_manual_seal; + +/// PoW consensus algorithm for substrate. +#[cfg(feature = "sc-consensus-pow")] +pub use sc_consensus_pow; + +/// Generic slots-based utilities for consensus. +#[cfg(feature = "sc-consensus-slots")] +pub use sc_consensus_slots; + +/// A crate that provides means of executing/dispatching calls into the runtime. +#[cfg(feature = "sc-executor")] +pub use sc_executor; + +/// A set of common definitions that are needed for defining execution engines. +#[cfg(feature = "sc-executor-common")] +pub use sc_executor_common; + +/// PolkaVM executor for Substrate. +#[cfg(feature = "sc-executor-polkavm")] +pub use sc_executor_polkavm; + +/// Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +#[cfg(feature = "sc-executor-wasmtime")] +pub use sc_executor_wasmtime; + +/// Substrate informant. +#[cfg(feature = "sc-informant")] +pub use sc_informant; + +/// Keystore (and session key management) for ed25519 based chains like Polkadot. +#[cfg(feature = "sc-keystore")] +pub use sc_keystore; + +/// Substrate mixnet service. +#[cfg(feature = "sc-mixnet")] +pub use sc_mixnet; + +/// Substrate network protocol. +#[cfg(feature = "sc-network")] +pub use sc_network; + +/// Substrate network common. +#[cfg(feature = "sc-network-common")] +pub use sc_network_common; + +/// Gossiping for the Substrate network protocol. +#[cfg(feature = "sc-network-gossip")] +pub use sc_network_gossip; + +/// Substrate light network protocol. +#[cfg(feature = "sc-network-light")] +pub use sc_network_light; + +/// Substrate statement protocol. +#[cfg(feature = "sc-network-statement")] +pub use sc_network_statement; + +/// Substrate sync network protocol. +#[cfg(feature = "sc-network-sync")] +pub use sc_network_sync; + +/// Substrate transaction protocol. +#[cfg(feature = "sc-network-transactions")] +pub use sc_network_transactions; + +/// Substrate network types. +#[cfg(feature = "sc-network-types")] +pub use sc_network_types; + +/// Substrate offchain workers. +#[cfg(feature = "sc-offchain")] +pub use sc_offchain; + +/// Basic metrics for block production. +#[cfg(feature = "sc-proposer-metrics")] +pub use sc_proposer_metrics; + +/// Substrate Client RPC. +#[cfg(feature = "sc-rpc")] +pub use sc_rpc; + +/// Substrate RPC interfaces. +#[cfg(feature = "sc-rpc-api")] +pub use sc_rpc_api; + +/// Substrate RPC servers. +#[cfg(feature = "sc-rpc-server")] +pub use sc_rpc_server; + +/// Substrate RPC interface v2. +#[cfg(feature = "sc-rpc-spec-v2")] +pub use sc_rpc_spec_v2; + +/// Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. +/// Manages communication between them. +#[cfg(feature = "sc-service")] +pub use sc_service; + +/// State database maintenance. Handles canonicalization and pruning in the database. +#[cfg(feature = "sc-state-db")] +pub use sc_state_db; + +/// Substrate statement store. +#[cfg(feature = "sc-statement-store")] +pub use sc_statement_store; + +/// Storage monitor service for substrate. +#[cfg(feature = "sc-storage-monitor")] +pub use sc_storage_monitor; + +/// A RPC handler to create sync states for light clients. +#[cfg(feature = "sc-sync-state-rpc")] +pub use sc_sync_state_rpc; + +/// A crate that provides basic hardware and software telemetry information. +#[cfg(feature = "sc-sysinfo")] +pub use sc_sysinfo; + +/// Telemetry utils. +#[cfg(feature = "sc-telemetry")] +pub use sc_telemetry; + +/// Instrumentation implementation for substrate. +#[cfg(feature = "sc-tracing")] +pub use sc_tracing; + +/// Helper macros for Substrate's client CLI. +#[cfg(feature = "sc-tracing-proc-macro")] +pub use sc_tracing_proc_macro; + +/// Substrate transaction pool implementation. +#[cfg(feature = "sc-transaction-pool")] +pub use sc_transaction_pool; + +/// Transaction pool client facing API. +#[cfg(feature = "sc-transaction-pool-api")] +pub use sc_transaction_pool_api; + +/// I/O for Substrate runtimes. +#[cfg(feature = "sc-utils")] +pub use sc_utils; + +/// Helper crate for generating slot ranges for the Polkadot runtime. +#[cfg(feature = "slot-range-helper")] +pub use slot_range_helper; + +/// Snowbridge Beacon Primitives. +#[cfg(feature = "snowbridge-beacon-primitives")] +pub use snowbridge_beacon_primitives; + +/// Snowbridge Core. +#[cfg(feature = "snowbridge-core")] +pub use snowbridge_core; + +/// Snowbridge Ethereum. +#[cfg(feature = "snowbridge-ethereum")] +pub use snowbridge_ethereum; + +/// Snowbridge Outbound Queue Merkle Tree. +#[cfg(feature = "snowbridge-outbound-queue-merkle-tree")] +pub use snowbridge_outbound_queue_merkle_tree; + +/// Snowbridge Outbound Queue Runtime API. +#[cfg(feature = "snowbridge-outbound-queue-runtime-api")] +pub use snowbridge_outbound_queue_runtime_api; + +/// Snowbridge Ethereum Client Pallet. +#[cfg(feature = "snowbridge-pallet-ethereum-client")] +pub use snowbridge_pallet_ethereum_client; + +/// Snowbridge Ethereum Client Test Fixtures. +#[cfg(feature = "snowbridge-pallet-ethereum-client-fixtures")] +pub use snowbridge_pallet_ethereum_client_fixtures; + +/// Snowbridge Inbound Queue Pallet. +#[cfg(feature = "snowbridge-pallet-inbound-queue")] +pub use snowbridge_pallet_inbound_queue; + +/// Snowbridge Inbound Queue Test Fixtures. +#[cfg(feature = "snowbridge-pallet-inbound-queue-fixtures")] +pub use snowbridge_pallet_inbound_queue_fixtures; + +/// Snowbridge Outbound Queue Pallet. +#[cfg(feature = "snowbridge-pallet-outbound-queue")] +pub use snowbridge_pallet_outbound_queue; + +/// Snowbridge System Pallet. +#[cfg(feature = "snowbridge-pallet-system")] +pub use snowbridge_pallet_system; + +/// Snowbridge Router Primitives. +#[cfg(feature = "snowbridge-router-primitives")] +pub use snowbridge_router_primitives; + +/// Snowbridge Runtime Common. +#[cfg(feature = "snowbridge-runtime-common")] +pub use snowbridge_runtime_common; + +/// Snowbridge Runtime Tests. +#[cfg(feature = "snowbridge-runtime-test-common")] +pub use snowbridge_runtime_test_common; + +/// Snowbridge System Runtime API. +#[cfg(feature = "snowbridge-system-runtime-api")] +pub use snowbridge_system_runtime_api; + +/// Substrate runtime api primitives. +#[cfg(feature = "sp-api")] +pub use sp_api; + +/// Macros for declaring and implementing runtime apis. +#[cfg(feature = "sp-api-proc-macro")] +pub use sp_api_proc_macro; + +/// Provides facilities for generating application specific crypto wrapper types. +#[cfg(feature = "sp-application-crypto")] +pub use sp_application_crypto; + +/// Minimal fixed point arithmetic primitives and types for runtime. +#[cfg(feature = "sp-arithmetic")] +pub use sp_arithmetic; + +/// Authority discovery primitives. +#[cfg(feature = "sp-authority-discovery")] +pub use sp_authority_discovery; + +/// The block builder runtime api. +#[cfg(feature = "sp-block-builder")] +pub use sp_block_builder; + +/// Substrate blockchain traits and primitives. +#[cfg(feature = "sp-blockchain")] +pub use sp_blockchain; + +/// Common utilities for building and using consensus engines in substrate. +#[cfg(feature = "sp-consensus")] +pub use sp_consensus; + +/// Primitives for Aura consensus. +#[cfg(feature = "sp-consensus-aura")] +pub use sp_consensus_aura; + +/// Primitives for BABE consensus. +#[cfg(feature = "sp-consensus-babe")] +pub use sp_consensus_babe; + +/// Primitives for BEEFY protocol. +#[cfg(feature = "sp-consensus-beefy")] +pub use sp_consensus_beefy; + +/// Primitives for GRANDPA integration, suitable for WASM compilation. +#[cfg(feature = "sp-consensus-grandpa")] +pub use sp_consensus_grandpa; + +/// Primitives for Aura consensus. +#[cfg(feature = "sp-consensus-pow")] +pub use sp_consensus_pow; + +/// Primitives for slots-based consensus. +#[cfg(feature = "sp-consensus-slots")] +pub use sp_consensus_slots; + +/// Shareable Substrate types. +#[cfg(feature = "sp-core")] +pub use sp_core; + +/// Hashing primitives (deprecated: use sp-crypto-hashing for new applications). +#[cfg(feature = "sp-core-hashing")] +pub use sp_core_hashing; + +/// Procedural macros for calculating static hashes (deprecated in favor of +/// `sp-crypto-hashing-proc-macro`). +#[cfg(feature = "sp-core-hashing-proc-macro")] +pub use sp_core_hashing_proc_macro; + +/// Host functions for common Arkworks elliptic curve operations. +#[cfg(feature = "sp-crypto-ec-utils")] +pub use sp_crypto_ec_utils; + +/// Hashing primitives. +#[cfg(feature = "sp-crypto-hashing")] +pub use sp_crypto_hashing; + +/// Procedural macros for calculating static hashes. +#[cfg(feature = "sp-crypto-hashing-proc-macro")] +pub use sp_crypto_hashing_proc_macro; + +/// Substrate database trait. +#[cfg(feature = "sp-database")] +pub use sp_database; + +/// Macros to derive runtime debug implementation. +#[cfg(feature = "sp-debug-derive")] +pub use sp_debug_derive; + +/// Substrate externalities abstraction. +#[cfg(feature = "sp-externalities")] +pub use sp_externalities; + +/// Substrate RuntimeGenesisConfig builder API. +#[cfg(feature = "sp-genesis-builder")] +pub use sp_genesis_builder; + +/// Provides types and traits for creating and checking inherents. +#[cfg(feature = "sp-inherents")] +pub use sp_inherents; + +/// I/O for Substrate runtimes. +#[cfg(feature = "sp-io")] +pub use sp_io; + +/// Keyring support code for the runtime. A set of test accounts. +#[cfg(feature = "sp-keyring")] +pub use sp_keyring; + +/// Keystore primitives. +#[cfg(feature = "sp-keystore")] +pub use sp_keystore; + +/// Handling of blobs, usually Wasm code, which may be compressed. +#[cfg(feature = "sp-maybe-compressed-blob")] +pub use sp_maybe_compressed_blob; + +/// Intermediate representation of the runtime metadata. +#[cfg(feature = "sp-metadata-ir")] +pub use sp_metadata_ir; + +/// Substrate mixnet types and runtime interface. +#[cfg(feature = "sp-mixnet")] +pub use sp_mixnet; + +/// Merkle Mountain Range primitives. +#[cfg(feature = "sp-mmr-primitives")] +pub use sp_mmr_primitives; + +/// NPoS election algorithm primitives. +#[cfg(feature = "sp-npos-elections")] +pub use sp_npos_elections; + +/// Substrate offchain workers primitives. +#[cfg(feature = "sp-offchain")] +pub use sp_offchain; + +/// Custom panic hook with bug report link. +#[cfg(feature = "sp-panic-handler")] +pub use sp_panic_handler; + +/// Substrate RPC primitives and utilities. +#[cfg(feature = "sp-rpc")] +pub use sp_rpc; + +/// Runtime Modules shared primitive types. +#[cfg(feature = "sp-runtime")] +pub use sp_runtime; + +/// Substrate runtime interface. +#[cfg(feature = "sp-runtime-interface")] +pub use sp_runtime_interface; + +/// This crate provides procedural macros for usage within the context of the Substrate runtime +/// interface. +#[cfg(feature = "sp-runtime-interface-proc-macro")] +pub use sp_runtime_interface_proc_macro; + +/// Primitives for sessions. +#[cfg(feature = "sp-session")] +pub use sp_session; + +/// A crate which contains primitives that are useful for implementation that uses staking +/// approaches in general. Definitions related to sessions, slashing, etc go here. +#[cfg(feature = "sp-staking")] +pub use sp_staking; + +/// Substrate State Machine. +#[cfg(feature = "sp-state-machine")] +pub use sp_state_machine; + +/// A crate which contains primitives related to the statement store. +#[cfg(feature = "sp-statement-store")] +pub use sp_statement_store; + +/// Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std +/// or client/alloc to be used with any code that depends on the runtime. +#[cfg(feature = "sp-std")] +pub use sp_std; + +/// Storage related primitives. +#[cfg(feature = "sp-storage")] +pub use sp_storage; + +/// Substrate core types and inherents for timestamps. +#[cfg(feature = "sp-timestamp")] +pub use sp_timestamp; + +/// Instrumentation primitives and macros for Substrate. +#[cfg(feature = "sp-tracing")] +pub use sp_tracing; + +/// Transaction pool runtime facing API. +#[cfg(feature = "sp-transaction-pool")] +pub use sp_transaction_pool; + +/// Transaction storage proof primitives. +#[cfg(feature = "sp-transaction-storage-proof")] +pub use sp_transaction_storage_proof; + +/// Patricia trie stuff using a parity-scale-codec node format. +#[cfg(feature = "sp-trie")] +pub use sp_trie; + +/// Version module for the Substrate runtime; Provides a function that returns the runtime +/// version. +#[cfg(feature = "sp-version")] +pub use sp_version; + +/// Macro for defining a runtime version. +#[cfg(feature = "sp-version-proc-macro")] +pub use sp_version_proc_macro; + +/// Types and traits for interfacing between the host and the wasm runtime. +#[cfg(feature = "sp-wasm-interface")] +pub use sp_wasm_interface; + +/// Types and traits for interfacing between the host and the wasm runtime. +#[cfg(feature = "sp-weights")] +pub use sp_weights; + +/// Substrate node block inspection tool. +#[cfg(feature = "staging-node-inspect")] +pub use staging_node_inspect; + +/// Pallet to store the parachain ID. +#[cfg(feature = "staging-parachain-info")] +pub use staging_parachain_info; + +/// Tracking allocator to control the amount of memory consumed by the process. +#[cfg(feature = "staging-tracking-allocator")] +pub use staging_tracking_allocator; + +/// The basic XCM datastructures. +#[cfg(feature = "staging-xcm")] +pub use staging_xcm; + +/// Tools & types for building with XCM and its executor. +#[cfg(feature = "staging-xcm-builder")] +pub use staging_xcm_builder; + +/// An abstract and configurable XCM message executor. +#[cfg(feature = "staging-xcm-executor")] +pub use staging_xcm_executor; + +/// Generate and restore keys for Substrate based chains such as Polkadot, Kusama and a growing +/// number of parachains and Substrate based projects. +#[cfg(feature = "subkey")] +pub use subkey; + +/// Converting BIP39 entropy to valid Substrate (sr25519) SecretKeys. +#[cfg(feature = "substrate-bip39")] +pub use substrate_bip39; + +/// Crate with utility functions for `build.rs` scripts. +#[cfg(feature = "substrate-build-script-utils")] +pub use substrate_build_script_utils; + +/// Substrate RPC for FRAME's support. +#[cfg(feature = "substrate-frame-rpc-support")] +pub use substrate_frame_rpc_support; + +/// FRAME's system exposed over Substrate RPC. +#[cfg(feature = "substrate-frame-rpc-system")] +pub use substrate_frame_rpc_system; + +/// Endpoint to expose Prometheus metrics. +#[cfg(feature = "substrate-prometheus-endpoint")] +pub use substrate_prometheus_endpoint; + +/// Shared JSON-RPC client. +#[cfg(feature = "substrate-rpc-client")] +pub use substrate_rpc_client; + +/// Node-specific RPC methods for interaction with state trie migration. +#[cfg(feature = "substrate-state-trie-migration-rpc")] +pub use substrate_state_trie_migration_rpc; + +/// Utility for building WASM binaries. +#[cfg(feature = "substrate-wasm-builder")] +pub use substrate_wasm_builder; + +/// Common constants for Testnet Parachains runtimes. +#[cfg(feature = "testnet-parachains-constants")] +pub use testnet_parachains_constants; + +/// Stick logs together with the TraceID as provided by tempo. +#[cfg(feature = "tracing-gum")] +pub use tracing_gum; + +/// Generate an overseer including builder pattern and message wrapper from a single annotated +/// struct definition. +#[cfg(feature = "tracing-gum-proc-macro")] +pub use tracing_gum_proc_macro; + +/// Constants used throughout the Westend network. +#[cfg(feature = "westend-runtime-constants")] +pub use westend_runtime_constants; + +/// Test kit to emulate XCM program execution. +#[cfg(feature = "xcm-emulator")] +pub use xcm_emulator; + +/// XCM fee payment runtime API. +#[cfg(feature = "xcm-fee-payment-runtime-api")] +pub use xcm_fee_payment_runtime_api; + +/// Procedural macros for XCM. +#[cfg(feature = "xcm-procedural")] +pub use xcm_procedural; + +/// Test kit to simulate cross-chain message passing and XCM execution. +#[cfg(feature = "xcm-simulator")] +pub use xcm_simulator; -- GitLab From f469fbfb0a44c4e223488b07ec641ca02b2fb8f1 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 24 May 2024 17:14:44 +0300 Subject: [PATCH 061/106] availability-recovery: bump chunk fetch threshold to 1MB for Polkadot and 4MB for Kusama + testnets (#4399) Doing this change ensures that we minimize the CPU usage we spend in reed-solomon by only doing the re-encoding into chunks if PoV size is less than 4MB (which means all PoVs right now) Based on susbystem benchmark results we concluded that it is safe to bump this number higher. At worst case scenario the network pressure for a backing group of 5 is around 25% of the network bandwidth in hw specs. Assuming 6s block times (max_candidate_depth 3) and needed_approvals 30 the amount of bandwidth usage of a backing group used would hover above `30 * 4 * 3 = 360MB` per relay chain block. Given a backing group of 5 that gives 72MB per block per validator -> 12 MB/s.
Reality check on Kusama PoV sizes (click for chart)
Screenshot 2024-05-07 at 14 30 38
--------- Signed-off-by: Andrei Sandu --- .../network/availability-recovery/src/lib.rs | 22 +++++++++++++------ .../availability-recovery/src/tests.rs | 6 +++-- polkadot/node/service/src/lib.rs | 7 ++++++ polkadot/node/service/src/overseer.rs | 7 ++++++ 4 files changed, 33 insertions(+), 9 deletions(-) diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 94b9d9546cd..b836870cd8a 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -77,8 +77,10 @@ const LRU_SIZE: u32 = 16; const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); -/// PoV size limit in bytes for which prefer fetching from backers. -const SMALL_POV_LIMIT: usize = 128 * 1024; +/// PoV size limit in bytes for which prefer fetching from backers. (conservative, Polkadot for now) +pub(crate) const CONSERVATIVE_FETCH_CHUNKS_THRESHOLD: usize = 1 * 1024 * 1024; +/// PoV size limit in bytes for which prefer fetching from backers. (Kusama and all testnets) +pub const FETCH_CHUNKS_THRESHOLD: usize = 4 * 1024 * 1024; #[derive(Clone, PartialEq)] /// The strategy we use to recover the PoV. @@ -448,7 +450,7 @@ async fn handle_recover( if let Some(backing_validators) = session_info.validator_groups.get(backing_group) { let mut small_pov_size = true; - if let RecoveryStrategyKind::BackersFirstIfSizeLower(small_pov_limit) = + if let RecoveryStrategyKind::BackersFirstIfSizeLower(fetch_chunks_threshold) = recovery_strategy_kind { // Get our own chunk size to get an estimate of the PoV size. @@ -457,13 +459,13 @@ async fn handle_recover( if let Ok(Some(chunk_size)) = chunk_size { let pov_size_estimate = chunk_size.saturating_mul(session_info.validators.len()) / 3; - small_pov_size = pov_size_estimate < small_pov_limit; + small_pov_size = pov_size_estimate < fetch_chunks_threshold; gum::trace!( target: LOG_TARGET, ?candidate_hash, pov_size_estimate, - small_pov_limit, + fetch_chunks_threshold, enabled = small_pov_size, "Prefer fetch from backing group", ); @@ -547,11 +549,14 @@ impl AvailabilityRecoverySubsystem { /// which never requests the `AvailabilityStoreSubsystem` subsystem and only checks the POV hash /// instead of reencoding the available data. pub fn for_collator( + fetch_chunks_threshold: Option, req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower( + fetch_chunks_threshold.unwrap_or(CONSERVATIVE_FETCH_CHUNKS_THRESHOLD), + ), bypass_availability_store: true, post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, @@ -591,11 +596,14 @@ impl AvailabilityRecoverySubsystem { /// Create a new instance of `AvailabilityRecoverySubsystem` which requests chunks if PoV is /// above a threshold. pub fn with_chunks_if_pov_large( + fetch_chunks_threshold: Option, req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower( + fetch_chunks_threshold.unwrap_or(CONSERVATIVE_FETCH_CHUNKS_THRESHOLD), + ), bypass_availability_store: false, post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 909f6a25f46..6049a5a5c3a 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -906,6 +906,7 @@ fn recovers_from_only_chunks_if_pov_large() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + Some(FETCH_CHUNKS_THRESHOLD), request_receiver(&req_protocol_names), Metrics::new_dummy(), ); @@ -942,7 +943,7 @@ fn recovers_from_only_chunks_if_pov_large() { AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunkSize(_, tx) ) => { - let _ = tx.send(Some(1000000)); + let _ = tx.send(Some(crate::FETCH_CHUNKS_THRESHOLD + 1)); } ); @@ -987,7 +988,7 @@ fn recovers_from_only_chunks_if_pov_large() { AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunkSize(_, tx) ) => { - let _ = tx.send(Some(1000000)); + let _ = tx.send(Some(crate::FETCH_CHUNKS_THRESHOLD + 1)); } ); @@ -1015,6 +1016,7 @@ fn fast_path_backing_group_recovers_if_pov_small() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + Some(FETCH_CHUNKS_THRESHOLD), request_receiver(&req_protocol_names), Metrics::new_dummy(), ); diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 665533e9bc7..6d365b93ac7 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -750,6 +750,7 @@ pub fn new_full< prepare_workers_hard_max_num, }: NewFullParams, ) -> Result { + use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD; use polkadot_node_network_protocol::request_response::IncomingRequest; use sc_network_sync::WarpSyncParams; @@ -988,6 +989,11 @@ pub fn new_full< stagnant_check_interval: Default::default(), stagnant_check_mode: chain_selection_subsystem::StagnantCheckMode::PruneOnly, }; + + // Kusama + testnets get a higher threshold, we are conservative on Polkadot for now. + let fetch_chunks_threshold = + if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) }; + Some(ExtendedOverseerGenArgs { keystore, parachains_db, @@ -1001,6 +1007,7 @@ pub fn new_full< dispute_req_receiver, dispute_coordinator_config, chain_selection_config, + fetch_chunks_threshold, }) }; diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 4b7777a0967..175a77e1c5f 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -133,6 +133,10 @@ pub struct ExtendedOverseerGenArgs { pub dispute_coordinator_config: DisputeCoordinatorConfig, /// Configuration for the chain selection subsystem. pub chain_selection_config: ChainSelectionConfig, + /// Optional availability recovery fetch chunks threshold. If PoV size size is lower + /// than the value put in here we always try to recovery availability from backers. + /// The presence of this parameter here is needed to have different values per chain. + pub fetch_chunks_threshold: Option, } /// Obtain a prepared validator `Overseer`, that is initialized with all default values. @@ -166,6 +170,7 @@ pub fn validator_overseer_builder( dispute_req_receiver, dispute_coordinator_config, chain_selection_config, + fetch_chunks_threshold, }: ExtendedOverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -240,6 +245,7 @@ where Metrics::register(registry)?, )) .availability_recovery(AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + fetch_chunks_threshold, available_data_req_receiver, Metrics::register(registry)?, )) @@ -421,6 +427,7 @@ where )) .availability_distribution(DummySubsystem) .availability_recovery(AvailabilityRecoverySubsystem::for_collator( + None, available_data_req_receiver, Metrics::register(registry)?, )) -- GitLab From e192b764971f99975e876380f9ebbf2c08f0c17d Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Fri, 24 May 2024 22:59:12 +0200 Subject: [PATCH 062/106] Avoid using `xcm::v4` and use latest instead for AssetHub benchmarks (#4567) --- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 16 ++++++++-------- .../runtimes/assets/asset-hub-westend/src/lib.rs | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 25c66afc8a5..4705d12e60c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -93,8 +93,8 @@ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ - Asset, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, NetworkId, - NonFungible, Parent, ParentThen, Response, XCM_VERSION, + Asset, Assets as XcmAssets, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, + NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; use xcm::{ latest::prelude::{AssetId, BodyId}, @@ -1535,7 +1535,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(xcm::v4::Assets, u32, Location, Box)> { + ) -> Option<(XcmAssets, u32, Location, Box)> { // Transfer to Relay some local AH asset (local-reserve-transfer) while paying // fees using teleported native token. // (We don't care that Relay doesn't accept incoming unknown AH local asset) @@ -1566,7 +1566,7 @@ impl_runtime_apis! { ); let transfer_asset: Asset = (asset_location, asset_amount).into(); - let assets: xcm::v4::Assets = vec![fee_asset.clone(), transfer_asset].into(); + let assets: XcmAssets = vec![fee_asset.clone(), transfer_asset].into(); let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; // verify transferred successfully @@ -1634,7 +1634,7 @@ impl_runtime_apis! { fn valid_destination() -> Result { Ok(TokenLocation::get()) } - fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { + fn worst_case_holding(depositable_count: u32) -> XcmAssets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; let holding_fungibles = holding_non_fungibles.saturating_sub(2); // -2 for two `iter::once` bellow @@ -1695,7 +1695,7 @@ impl_runtime_apis! { (0u64, Response::Version(Default::default())) } - fn worst_case_asset_exchange() -> Result<(xcm::v4::Assets, xcm::v4::Assets), BenchmarkError> { + fn worst_case_asset_exchange() -> Result<(XcmAssets, XcmAssets), BenchmarkError> { Err(BenchmarkError::Skip) } @@ -1714,9 +1714,9 @@ impl_runtime_apis! { Ok(TokenLocation::get()) } - fn claimable_asset() -> Result<(Location, Location, xcm::v4::Assets), BenchmarkError> { + fn claimable_asset() -> Result<(Location, Location, XcmAssets), BenchmarkError> { let origin = TokenLocation::get(); - let assets: xcm::v4::Assets = (TokenLocation::get(), 1_000 * UNITS).into(); + let assets: XcmAssets = (TokenLocation::get(), 1_000 * UNITS).into(); let ticket = Location { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index c8d388df16c..a82094d6f8a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -95,8 +95,8 @@ use xcm::latest::prelude::AssetId; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ - Asset, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, NetworkId, - NonFungible, Parent, ParentThen, Response, XCM_VERSION, + Asset, Assets as XcmAssets, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, + NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; use xcm_fee_payment_runtime_api::{ @@ -1629,7 +1629,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(xcm::v4::Assets, u32, Location, Box)> { + ) -> Option<(XcmAssets, u32, Location, Box)> { // Transfer to Relay some local AH asset (local-reserve-transfer) while paying // fees using teleported native token. // (We don't care that Relay doesn't accept incoming unknown AH local asset) @@ -1660,7 +1660,7 @@ impl_runtime_apis! { ); let transfer_asset: Asset = (asset_location, asset_amount).into(); - let assets: xcm::v4::Assets = vec![fee_asset.clone(), transfer_asset].into(); + let assets: XcmAssets = vec![fee_asset.clone(), transfer_asset].into(); let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; // verify transferred successfully @@ -1733,7 +1733,7 @@ impl_runtime_apis! { fn valid_destination() -> Result { Ok(WestendLocation::get()) } - fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { + fn worst_case_holding(depositable_count: u32) -> XcmAssets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; let holding_fungibles = holding_non_fungibles - 2; // -2 for two `iter::once` bellow @@ -1794,7 +1794,7 @@ impl_runtime_apis! { (0u64, Response::Version(Default::default())) } - fn worst_case_asset_exchange() -> Result<(xcm::v4::Assets, xcm::v4::Assets), BenchmarkError> { + fn worst_case_asset_exchange() -> Result<(XcmAssets, XcmAssets), BenchmarkError> { Err(BenchmarkError::Skip) } @@ -1813,9 +1813,9 @@ impl_runtime_apis! { Ok(WestendLocation::get()) } - fn claimable_asset() -> Result<(Location, Location, xcm::v4::Assets), BenchmarkError> { + fn claimable_asset() -> Result<(Location, Location, XcmAssets), BenchmarkError> { let origin = WestendLocation::get(); - let assets: xcm::v4::Assets = (AssetId(WestendLocation::get()), 1_000 * UNITS).into(); + let assets: XcmAssets = (AssetId(WestendLocation::get()), 1_000 * UNITS).into(); let ticket = Location { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } -- GitLab From 9201f9abbe0b63abbeabc1f6e6799cca030c8c46 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Mon, 27 May 2024 07:12:34 +0100 Subject: [PATCH 063/106] Deprecate XCMv2 (#4131) Marked XCMv2 as deprecated now that we have XCMv4. It will be removed sometime around June 2024. --------- Co-authored-by: Branislav Kontur --- cumulus/pallets/xcmp-queue/src/mock.rs | 20 +- cumulus/pallets/xcmp-queue/src/tests.rs | 18 +- .../bridge-hub-rococo/src/tests/send_xcm.rs | 68 ++----- .../bridge-hub-westend/src/tests/send_xcm.rs | 68 ++----- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 21 ++- .../pallet-xcm/src/tests/assets_transfer.rs | 2 +- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 142 +++++++------- .../procedural/tests/conversion_functions.rs | 4 +- polkadot/xcm/src/lib.rs | 6 + polkadot/xcm/src/tests.rs | 12 -- polkadot/xcm/src/v2/mod.rs | 22 ++- polkadot/xcm/src/v3/mod.rs | 175 +++++------------- polkadot/xcm/src/v3/traits.rs | 5 + polkadot/xcm/src/v4/mod.rs | 2 +- .../xcm-builder/src/process_xcm_message.rs | 16 +- prdoc/pr_4131.prdoc | 26 +++ 16 files changed, 249 insertions(+), 358 deletions(-) create mode 100644 prdoc/pr_4131.prdoc diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index dd87e07c33f..e166a78ee82 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -321,10 +321,13 @@ impl GetChannelInfo for MockedChannelInfo { pub(crate) fn mk_page() -> Vec { let mut page = Vec::::new(); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + for i in 0..100 { page.extend(match i % 2 { - 0 => v2_xcm().encode(), - 1 => v3_xcm().encode(), + 0 => versioned_xcm(older_xcm_version).encode(), + 1 => versioned_xcm(newer_xcm_version).encode(), // We cannot push an undecodable XCM here since it would break the decode stream. // This is expected and the whole reason to introduce `MaybeDoubleEncodedVersionedXcm` // instead. @@ -335,12 +338,9 @@ pub(crate) fn mk_page() -> Vec { page } -pub(crate) fn v2_xcm() -> VersionedXcm<()> { - let instr = xcm::v2::Instruction::<()>::ClearOrigin; - VersionedXcm::V2(xcm::v2::Xcm::<()>(vec![instr; 3])) -} - -pub(crate) fn v3_xcm() -> VersionedXcm<()> { - let instr = xcm::v3::Instruction::<()>::Trap(1); - VersionedXcm::V3(xcm::v3::Xcm::<()>(vec![instr; 3])) +pub(crate) fn versioned_xcm(version: XcmVersion) -> VersionedXcm<()> { + let instr = Instruction::<()>::Trap(1); + VersionedXcm::from(Xcm::<()>(vec![instr; 3])) + .into_version(version) + .expect("Version conversion should work") } diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index 7c02059e5a9..cdf41e27f0b 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -14,7 +14,7 @@ // limitations under the License. use super::{ - mock::{mk_page, v2_xcm, v3_xcm, EnqueuedMessages, HRMP_PARA_ID}, + mock::{mk_page, versioned_xcm, EnqueuedMessages, HRMP_PARA_ID}, *, }; use XcmpMessageFormat::*; @@ -536,8 +536,8 @@ fn hrmp_signals_are_prioritized() { #[test] fn maybe_double_encoded_versioned_xcm_works() { // pre conditions - assert_eq!(VersionedXcm::<()>::V2(Default::default()).encode(), &[2, 0]); assert_eq!(VersionedXcm::<()>::V3(Default::default()).encode(), &[3, 0]); + assert_eq!(VersionedXcm::<()>::V4(Default::default()).encode(), &[4, 0]); } // Now also testing a page instead of just concat messages. @@ -545,15 +545,18 @@ fn maybe_double_encoded_versioned_xcm_works() { fn maybe_double_encoded_versioned_xcm_decode_page_works() { let page = mk_page(); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + // Now try to decode the page. let input = &mut &page[..]; for i in 0..100 { match (i % 2, VersionedXcm::<()>::decode(input)) { (0, Ok(xcm)) => { - assert_eq!(xcm, v2_xcm()); + assert_eq!(xcm, versioned_xcm(older_xcm_version)); }, (1, Ok(xcm)) => { - assert_eq!(xcm, v3_xcm()); + assert_eq!(xcm, versioned_xcm(newer_xcm_version)); }, unexpected => unreachable!("{:?}", unexpected), } @@ -568,14 +571,17 @@ fn take_first_concatenated_xcm_works() { let page = mk_page(); let input = &mut &page[..]; + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + for i in 0..100 { let xcm = XcmpQueue::take_first_concatenated_xcm(input, &mut WeightMeter::new()).unwrap(); match (i % 2, xcm) { (0, data) | (2, data) => { - assert_eq!(data, v2_xcm().encode()); + assert_eq!(data, versioned_xcm(older_xcm_version).encode()); }, (1, data) | (3, data) => { - assert_eq!(data, v3_xcm().encode()); + assert_eq!(data, versioned_xcm(newer_xcm_version).encode()); }, unexpected => unreachable!("{:?}", unexpected), } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index a1d871cdb61..78788634e6f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -61,10 +61,13 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable #[test] fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // Initially set only default version on all runtimes - AssetHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - AssetHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + + AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); + AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); // prepare data let destination = asset_hub_westend_location(); @@ -87,42 +90,12 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // set destination version - AssetHubRococo::force_xcm_version(destination.clone(), xcm::v3::prelude::XCM_VERSION); - - // TODO: remove this block, when removing `xcm:v2` - { - // send XCM from AssetHubRococo - fails - AssetHubRococo is set to the default/safe `2` - // version, which does not have the `ExportMessage` instruction. If the default `2` is - // changed to `3`, then this assert can go away" - assert_err!( - send_asset_from_asset_hub_rococo(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - - // set exact version for BridgeHubWestend to `2` without `ExportMessage` instruction - AssetHubRococo::force_xcm_version( - ParentThen(Parachain(BridgeHubRococo::para_id().into()).into()).into(), - xcm::v2::prelude::XCM_VERSION, - ); - // send XCM from AssetHubRococo - fails - `ExportMessage` is not in `2` - assert_err!( - send_asset_from_asset_hub_rococo(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - } + AssetHubRococo::force_xcm_version(destination.clone(), newer_xcm_version); // set version with `ExportMessage` for BridgeHubRococo AssetHubRococo::force_xcm_version( ParentThen(Parachain(BridgeHubRococo::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubRococo - ok assert_ok!(send_asset_from_asset_hub_rococo( @@ -134,14 +107,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { assert_bridge_hub_rococo_message_accepted(false); // set version for remote BridgeHub on BridgeHubRococo - BridgeHubRococo::force_xcm_version( - bridge_hub_westend_location(), - xcm::v3::prelude::XCM_VERSION, - ); + BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), newer_xcm_version); // set version for AssetHubWestend on BridgeHubWestend BridgeHubWestend::force_xcm_version( ParentThen(Parachain(AssetHubWestend::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubRococo - ok @@ -164,20 +134,4 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ] ); }); - - // TODO: remove this block, when removing `xcm:v2` - { - // set `2` version for remote BridgeHub on BridgeHubRococo, which does not have - // `UniversalOrigin` and `DescendOrigin` - BridgeHubRococo::force_xcm_version( - bridge_hub_westend_location(), - xcm::v2::prelude::XCM_VERSION, - ); - - // send XCM from AssetHubRococo - ok - assert_ok!(send_asset_from_asset_hub_rococo(destination, (native_token, amount))); - // message is not accepted on the local BridgeHub (`DestinationUnsupported`) because we - // cannot add `UniversalOrigin` and `DescendOrigin` - assert_bridge_hub_rococo_message_accepted(false); - } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index b01be5e8dc8..8539df97be9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -61,10 +61,13 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable #[test] fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // Initially set only default version on all runtimes - AssetHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - AssetHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + + AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); + AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); // prepare data let destination = asset_hub_rococo_location(); @@ -87,42 +90,12 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // set destination version - AssetHubWestend::force_xcm_version(destination.clone(), xcm::v3::prelude::XCM_VERSION); - - // TODO: remove this block, when removing `xcm:v2` - { - // send XCM from AssetHubRococo - fails - AssetHubRococo is set to the default/safe `2` - // version, which does not have the `ExportMessage` instruction. If the default `2` is - // changed to `3`, then this assert can go away" - assert_err!( - send_asset_from_asset_hub_westend(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - - // set exact version for BridgeHubWestend to `2` without `ExportMessage` instruction - AssetHubWestend::force_xcm_version( - ParentThen(Parachain(BridgeHubWestend::para_id().into()).into()).into(), - xcm::v2::prelude::XCM_VERSION, - ); - // send XCM from AssetHubWestend - fails - `ExportMessage` is not in `2` - assert_err!( - send_asset_from_asset_hub_westend(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - } + AssetHubWestend::force_xcm_version(destination.clone(), newer_xcm_version); // set version with `ExportMessage` for BridgeHubWestend AssetHubWestend::force_xcm_version( ParentThen(Parachain(BridgeHubWestend::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubWestend - ok assert_ok!(send_asset_from_asset_hub_westend( @@ -134,14 +107,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { assert_bridge_hub_westend_message_accepted(false); // set version for remote BridgeHub on BridgeHubWestend - BridgeHubWestend::force_xcm_version( - bridge_hub_rococo_location(), - xcm::v3::prelude::XCM_VERSION, - ); + BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), newer_xcm_version); // set version for AssetHubRococo on BridgeHubRococo BridgeHubRococo::force_xcm_version( ParentThen(Parachain(AssetHubRococo::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubWestend - ok @@ -164,20 +134,4 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ] ); }); - - // TODO: remove this block, when removing `xcm:v2` - { - // set `2` version for remote BridgeHub on BridgeHubRococo, which does not have - // `UniversalOrigin` and `DescendOrigin` - BridgeHubWestend::force_xcm_version( - bridge_hub_rococo_location(), - xcm::v2::prelude::XCM_VERSION, - ); - - // send XCM from AssetHubWestend - ok - assert_ok!(send_asset_from_asset_hub_westend(destination, (native_token, amount))); - // message is not accepted on the local BridgeHub (`DestinationUnsupported`) because we - // cannot add `UniversalOrigin` and `DescendOrigin` - assert_bridge_hub_westend_message_accepted(false); - } } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 081a4235b77..da46a6a37c0 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -15,12 +15,11 @@ // along with Polkadot. If not, see . use super::*; -use bounded_collections::{ConstU32, WeakBoundedVec}; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; -use xcm::{latest::prelude::*, v2}; +use xcm::latest::prelude::*; use xcm_builder::EnsureDelivery; use xcm_executor::traits::FeeReason; @@ -313,15 +312,17 @@ benchmarks! { } notify_target_migration_fail { - let bad_loc: v2::MultiLocation = v2::Junction::Plurality { - id: v2::BodyId::Named(WeakBoundedVec::>::try_from(vec![0; 32]) - .expect("vec has a length of 32 bits; qed")), - part: v2::BodyPart::Voice, - } - .into(); - let bad_loc = VersionedLocation::from(bad_loc); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + let bad_location: Location = Plurality { + id: BodyId::Unit, + part: BodyPart::Voice, + }.into(); + let bad_location = VersionedLocation::from(bad_location) + .into_version(older_xcm_version) + .expect("Version convertion should work"); let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); + VersionNotifyTargets::::insert(current_version, bad_location, (0, Weight::zero(), current_version)); }: { crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index f42e220d693..af81ac9cf43 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -76,7 +76,7 @@ fn limited_teleport_assets_works() { )] ); let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + let _check_v3_ok: xcm::v3::Xcm<()> = versioned_sent.try_into().unwrap(); let mut last_events = last_events(3).into_iter(); assert_eq!( diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 02aeafd68e8..c16c1a1ba98 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -602,11 +602,11 @@ fn basic_subscription_works() { let weight = BaseXcmWeight::get(); let mut message = Xcm::<()>(vec![ - // Remote supports XCM v2 + // Remote supports XCM v3 QueryResponse { query_id: 0, max_weight: Weight::zero(), - response: Response::Version(1), + response: Response::Version(3), querier: None, }, ]); @@ -764,14 +764,14 @@ fn subscription_side_upgrades_work_with_notify() { new_test_ext_with_balances(vec![]).execute_with(|| { AdvertisedXcmVersion::set(1); - // An entry from a previous runtime with v2 XCM. - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); + // An entry from a previous runtime with v3 XCM. + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(3, v3_location, (70, Weight::zero(), 3)); + let v4_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(4, v4_location, (72, Weight::zero(), 3)); // New version. - AdvertisedXcmVersion::set(3); + AdvertisedXcmVersion::set(4); // A runtime upgrade which alters the version does send notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -780,13 +780,13 @@ fn subscription_side_upgrades_work_with_notify() { let instr1 = QueryResponse { query_id: 70, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let instr3 = QueryResponse { query_id: 72, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let mut sent = take_sent_xcm(); @@ -807,8 +807,8 @@ fn subscription_side_upgrades_work_with_notify() { assert_eq!( contents, vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), + (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 4)), + (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 4)), ] ); }); @@ -817,11 +817,11 @@ fn subscription_side_upgrades_work_with_notify() { #[test] fn subscription_side_upgrades_work_without_notify() { new_test_ext_with_balances(vec![]).execute_with(|| { - // An entry from a previous runtime with v2 XCM. - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); + // An entry from a previous runtime with v3 XCM. + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(3, v3_location, (70, Weight::zero(), 3)); + let v4_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(4, v4_location, (72, Weight::zero(), 3)); // A runtime upgrade which alters the version does send notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -854,11 +854,11 @@ fn subscriber_side_subscription_works() { let weight = BaseXcmWeight::get(); let message = Xcm(vec![ - // Remote supports XCM v2 + // Remote supports XCM v3 QueryResponse { query_id: 0, max_weight: Weight::zero(), - response: Response::Version(1), + response: Response::Version(3), querier: None, }, ]); @@ -872,18 +872,21 @@ fn subscriber_side_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); assert_eq!(take_sent_xcm(), vec![]); - assert_eq!(XcmPallet::get_version_for(&remote), Some(1)); + assert_eq!(XcmPallet::get_version_for(&remote), Some(3)); - // This message cannot be sent to a v2 remote. - let v2_msg = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); - assert_eq!(XcmPallet::wrap_version(&remote, v2_msg.clone()), Err(())); + // This message will be sent as v3. + let v4_msg = xcm::v4::Xcm::<()>(vec![xcm::v4::Instruction::Trap(0)]); + assert_eq!( + XcmPallet::wrap_version(&remote, v4_msg.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::Trap(0)]))) + ); let message = Xcm(vec![ - // Remote upgraded to XCM v2 + // Remote upgraded to XCM v4 QueryResponse { query_id: 0, max_weight: Weight::zero(), - response: Response::Version(2), + response: Response::Version(4), querier: None, }, ]); @@ -897,12 +900,12 @@ fn subscriber_side_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); assert_eq!(take_sent_xcm(), vec![]); - assert_eq!(XcmPallet::get_version_for(&remote), Some(2)); + assert_eq!(XcmPallet::get_version_for(&remote), Some(4)); - // This message can now be sent to remote as it's v2. + // This message is now sent as v4. assert_eq!( - XcmPallet::wrap_version(&remote, v2_msg.clone()), - Ok(VersionedXcm::from(v2_msg)) + XcmPallet::wrap_version(&remote, v4_msg.clone()), + Ok(VersionedXcm::from(v4_msg)) ); }); } @@ -911,30 +914,36 @@ fn subscriber_side_subscription_works() { #[test] fn auto_subscription_works() { new_test_ext_with_balances_and_xcm_version(vec![], None).execute_with(|| { - let remote_v2: Location = Parachain(1000).into(); + let remote_v3: Location = Parachain(1000).into(); let remote_v4: Location = Parachain(1001).into(); - assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(2))); + assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(3))); // Wrapping a version for a destination we don't know elicits a subscription. - let msg_v2 = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); + let msg_v3 = xcm::v3::Xcm::<()>(vec![xcm::v3::Instruction::Trap(0)]); let msg_v4 = xcm::v4::Xcm::<()>(vec![xcm::v4::Instruction::ClearTopic]); assert_eq!( - XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone())), + XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone())), + ); + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v4.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::ClearTopic]))) ); - assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v4.clone()), Err(())); - let expected = vec![(remote_v2.clone().into(), 2)]; + let expected = vec![(remote_v3.clone().into(), 2)]; assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); assert_eq!( - XcmPallet::wrap_version(&remote_v4, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone())), + XcmPallet::wrap_version(&remote_v4, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone())), + ); + assert_eq!( + XcmPallet::wrap_version(&remote_v4, msg_v4.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::ClearTopic]))) ); - assert_eq!(XcmPallet::wrap_version(&remote_v4, msg_v4.clone()), Err(())); - let expected = vec![(remote_v2.clone().into(), 2), (remote_v4.clone().into(), 2)]; + let expected = vec![(remote_v3.clone().into(), 2), (remote_v4.clone().into(), 2)]; assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); XcmPallet::on_initialize(1); @@ -968,10 +977,10 @@ fn auto_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); - // V2 messages can be sent to remote_v4 under XCM v4. + // V3 messages can be sent to remote_v4 under XCM v4. assert_eq!( - XcmPallet::wrap_version(&remote_v4, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone()).into_version(4).unwrap()), + XcmPallet::wrap_version(&remote_v4, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone()).into_version(4).unwrap()), ); // This message can now be sent to remote_v4 as it's v4. assert_eq!( @@ -983,26 +992,26 @@ fn auto_subscription_works() { assert_eq!( take_sent_xcm(), vec![( - remote_v2.clone(), + remote_v3.clone(), Xcm(vec![SubscribeVersion { query_id: 1, max_response_weight: Weight::zero() }]), )] ); - // Assume remote_v2 is working ok and XCM version 2. + // Assume remote_v3 is working ok and XCM version 3. let weight = BaseXcmWeight::get(); let message = Xcm(vec![ - // Remote supports XCM v2 + // Remote supports XCM v3 QueryResponse { query_id: 1, max_weight: Weight::zero(), - response: Response::Version(2), + response: Response::Version(3), querier: None, }, ]); let mut hash = fake_message_hash(&message); let r = XcmExecutor::::prepare_and_execute( - remote_v2.clone(), + remote_v3.clone(), message, &mut hash, weight, @@ -1010,12 +1019,15 @@ fn auto_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); - // v4 messages cannot be sent to remote_v2... + // v4 messages cannot be sent to remote_v3... + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), + Ok(VersionedXcm::V3(msg_v3)) + ); assert_eq!( - XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), - Ok(VersionedXcm::V2(msg_v2)) + XcmPallet::wrap_version(&remote_v3, msg_v4.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::ClearTopic]))) ); - assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v4.clone()), Err(())); }) } @@ -1025,15 +1037,15 @@ fn subscription_side_upgrades_work_with_multistage_notify() { AdvertisedXcmVersion::set(1); // An entry from a previous runtime with v0 XCM. - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 1)); - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1002).into()); - VersionNotifyTargets::::insert(2, v2_location, (71, Weight::zero(), 1)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 1)); + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(3, v3_location, (70, Weight::zero(), 3)); + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1002).into()); + VersionNotifyTargets::::insert(3, v3_location, (71, Weight::zero(), 3)); + let v4_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(4, v4_location, (72, Weight::zero(), 3)); // New version. - AdvertisedXcmVersion::set(3); + AdvertisedXcmVersion::set(4); // A runtime upgrade which alters the version does send notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -1049,19 +1061,19 @@ fn subscription_side_upgrades_work_with_multistage_notify() { let instr1 = QueryResponse { query_id: 70, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let instr2 = QueryResponse { query_id: 71, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let instr3 = QueryResponse { query_id: 72, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let mut sent = take_sent_xcm(); @@ -1083,9 +1095,9 @@ fn subscription_side_upgrades_work_with_multistage_notify() { assert_eq!( contents, vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1002).into_versioned(), (71, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), + (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 4)), + (XCM_VERSION, Parachain(1002).into_versioned(), (71, Weight::zero(), 4)), + (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 4)), ] ); }); diff --git a/polkadot/xcm/procedural/tests/conversion_functions.rs b/polkadot/xcm/procedural/tests/conversion_functions.rs index 5b6965167fc..7d2698d2cd7 100644 --- a/polkadot/xcm/procedural/tests/conversion_functions.rs +++ b/polkadot/xcm/procedural/tests/conversion_functions.rs @@ -14,10 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use xcm::v2::prelude::*; +use xcm::v3::prelude::*; #[test] -fn slice_syntax_in_v2_works() { +fn slice_syntax_in_v3_works() { let old_junctions = Junctions::X2(Parachain(1), PalletInstance(1)); let new_junctions = Junctions::from([Parachain(1), PalletInstance(1)]); assert_eq!(old_junctions, new_junctions); diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 513dfe5501b..8b0030e59b5 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -21,6 +21,8 @@ // // Hence, `no_std` rather than sp-runtime. #![cfg_attr(not(feature = "std"), no_std)] +// Because of XCMv2. +#![allow(deprecated)] extern crate alloc; @@ -28,6 +30,9 @@ use derivative::Derivative; use parity_scale_codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use scale_info::TypeInfo; +#[deprecated( + note = "XCMv2 will be removed once XCMv5 is released. Please use XCMv3 or XCMv4 instead." +)] pub mod v2; pub mod v3; pub mod v4; @@ -425,6 +430,7 @@ pub type VersionedMultiAssets = VersionedAssets; #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum VersionedXcm { #[codec(index = 2)] + #[deprecated] V2(v2::Xcm), #[codec(index = 3)] V3(v3::Xcm), diff --git a/polkadot/xcm/src/tests.rs b/polkadot/xcm/src/tests.rs index 1aabbcef281..4c666063f3f 100644 --- a/polkadot/xcm/src/tests.rs +++ b/polkadot/xcm/src/tests.rs @@ -158,18 +158,6 @@ fn encode_decode_versioned_multi_assets_v3() { assert_eq!(assets, decoded); } -#[test] -fn encode_decode_versioned_xcm_v2() { - let xcm = VersionedXcm::V2(v2::Xcm::<()>::new()); - let encoded = xcm.encode(); - - assert_eq!(encoded, hex_literal::hex!("0200"), "encode format changed"); - assert_eq!(encoded[0], 2, "bad version number"); - - let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); - assert_eq!(xcm, decoded); -} - #[test] fn encode_decode_versioned_xcm_v3() { let xcm = VersionedXcm::V3(v3::Xcm::<()>::new()); diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index 347f3f2c292..7b6858e6a5c 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -15,6 +15,9 @@ // along with Cumulus. If not, see . //! # XCM Version 2 +//! +//! WARNING: DEPRECATED, please use version 3 or 4. +//! //! Version 2 of the Cross-Consensus Message format data structures. The comprehensive list of //! changes can be found in //! [this PR description](https://github.com/paritytech/polkadot/pull/3629#issue-968428279). @@ -52,8 +55,8 @@ use super::{ v3::{ BodyId as NewBodyId, BodyPart as NewBodyPart, Instruction as NewInstruction, - NetworkId as NewNetworkId, Response as NewResponse, WeightLimit as NewWeightLimit, - Xcm as NewXcm, + NetworkId as NewNetworkId, OriginKind as NewOriginKind, Response as NewResponse, + WeightLimit as NewWeightLimit, Xcm as NewXcm, }, DoubleEncoded, }; @@ -104,6 +107,18 @@ pub enum OriginKind { Xcm, } +impl From for OriginKind { + fn from(new: NewOriginKind) -> Self { + use NewOriginKind::*; + match new { + Native => Self::Native, + SovereignAccount => Self::SovereignAccount, + Superuser => Self::Superuser, + Xcm => Self::Xcm, + } + } +} + /// A global identifier of an account-bearing consensus system. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] @@ -262,6 +277,7 @@ pub const VERSION: super::Version = 2; /// An identifier for a query. pub type QueryId = u64; +/// DEPRECATED. Please use XCMv3 or XCMv4 instead. #[derive(Derivative, Default, Encode, Decode, TypeInfo)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] @@ -1065,7 +1081,7 @@ impl TryFrom> for Instruction Self::HrmpChannelClosing { initiator, sender, recipient }, Transact { origin_kind, require_weight_at_most, call } => Self::Transact { - origin_type: origin_kind, + origin_type: origin_kind.into(), require_weight_at_most: require_weight_at_most.ref_time(), call: call.into(), }, diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index e7c57f414eb..8ff661a9bba 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -16,15 +16,14 @@ //! Version 3 of the Cross-Consensus Message format data structures. -use super::{ - v2::{ - Instruction as OldInstruction, Response as OldResponse, WeightLimit as OldWeightLimit, - Xcm as OldXcm, - }, - v4::{ - Instruction as NewInstruction, PalletInfo as NewPalletInfo, - QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, - }, +#[allow(deprecated)] +use super::v2::{ + Instruction as OldInstruction, OriginKind as OldOriginKind, Response as OldResponse, + WeightLimit as OldWeightLimit, Xcm as OldXcm, +}; +use super::v4::{ + Instruction as NewInstruction, PalletInfo as NewPalletInfo, + QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, }; use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; @@ -53,11 +52,46 @@ pub use multilocation::{ Ancestor, AncestorThen, InteriorMultiLocation, Location, MultiLocation, Parent, ParentThen, }; pub use traits::{ - send_xcm, validate_send, Error, ExecuteXcm, Outcome, PreparedMessage, Result, SendError, - SendResult, SendXcm, Weight, XcmHash, + send_xcm, validate_send, Error, ExecuteXcm, GetWeight, Outcome, PreparedMessage, Result, + SendError, SendResult, SendXcm, Weight, XcmHash, }; -// These parts of XCM v2 are unchanged in XCM v3, and are re-imported here. -pub use super::v2::{GetWeight, OriginKind}; + +/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] +pub enum OriginKind { + /// Origin should just be the native dispatch origin representation for the sender in the + /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin + /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a + /// primary/native dispatch origin form. + Native, + + /// Origin should just be the standard account-based origin with the sovereign account of + /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. + SovereignAccount, + + /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. + /// This will not usually be an available option. + Superuser, + + /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be + /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be + /// the `pallet_xcm::Origin::Xcm` type. + Xcm, +} + +impl From for OriginKind { + fn from(old: OldOriginKind) -> Self { + use OldOriginKind::*; + match old { + Native => Self::Native, + SovereignAccount => Self::SovereignAccount, + Superuser => Self::Superuser, + Xcm => Self::Xcm, + } + } +} /// This module's XCM version. pub const VERSION: super::Version = 3; @@ -1310,6 +1344,7 @@ impl TryFrom for Response { } // Convert from a v2 XCM to a v3 XCM. +#[allow(deprecated)] impl TryFrom> for Xcm { type Error = (); fn try_from(old_xcm: OldXcm) -> result::Result { @@ -1500,7 +1535,7 @@ impl TryFrom> for Instruction { HrmpChannelClosing { initiator, sender, recipient } => Self::HrmpChannelClosing { initiator, sender, recipient }, Transact { origin_type, require_weight_at_most, call } => Self::Transact { - origin_kind: origin_type, + origin_kind: origin_type.into(), require_weight_at_most: Weight::from_parts( require_weight_at_most, DEFAULT_PROOF_SIZE, @@ -1572,118 +1607,6 @@ impl TryFrom> for Instruction { #[cfg(test)] mod tests { use super::{prelude::*, *}; - use crate::v2::{ - Junctions::Here as OldHere, MultiAssetFilter as OldMultiAssetFilter, - WildMultiAsset as OldWildMultiAsset, - }; - - #[test] - fn basic_roundtrip_works() { - let xcm = Xcm::<()>(vec![TransferAsset { - assets: (Here, 1u128).into(), - beneficiary: Here.into(), - }]); - let old_xcm = OldXcm::<()>(vec![OldInstruction::TransferAsset { - assets: (OldHere, 1).into(), - beneficiary: OldHere.into(), - }]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn teleport_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - ReceiveTeleportedAsset((Here, 1u128).into()), - ClearOrigin, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, - ]); - let old_xcm: OldXcm<()> = OldXcm::<()>(vec![ - OldInstruction::ReceiveTeleportedAsset((OldHere, 1).into()), - OldInstruction::ClearOrigin, - OldInstruction::DepositAsset { - assets: crate::v2::MultiAssetFilter::Wild(crate::v2::WildMultiAsset::All), - max_assets: 1, - beneficiary: OldHere.into(), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn reserve_deposit_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - ReserveAssetDeposited((Here, 1u128).into()), - ClearOrigin, - BuyExecution { - fees: (Here, 1u128).into(), - weight_limit: Some(Weight::from_parts(1, DEFAULT_PROOF_SIZE)).into(), - }, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::ReserveAssetDeposited((OldHere, 1).into()), - OldInstruction::ClearOrigin, - OldInstruction::BuyExecution { - fees: (OldHere, 1).into(), - weight_limit: Some(1).into(), - }, - OldInstruction::DepositAsset { - assets: crate::v2::MultiAssetFilter::Wild(crate::v2::WildMultiAsset::All), - max_assets: 1, - beneficiary: OldHere.into(), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn deposit_asset_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::WithdrawAsset((OldHere, 1).into()), - OldInstruction::DepositAsset { - assets: OldMultiAssetFilter::Wild(OldWildMultiAsset::All), - max_assets: 1, - beneficiary: OldHere.into(), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn deposit_reserve_asset_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - DepositReserveAsset { - assets: Wild(AllCounted(1)), - dest: Here.into(), - xcm: Xcm::<()>(vec![]), - }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::WithdrawAsset((OldHere, 1).into()), - OldInstruction::DepositReserveAsset { - assets: OldMultiAssetFilter::Wild(OldWildMultiAsset::All), - max_assets: 1, - dest: OldHere.into(), - xcm: OldXcm::<()>(vec![]), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } #[test] fn decoding_respects_limit() { diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index cfe387df1a8..680e0bacd0c 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -25,6 +25,11 @@ pub use sp_weights::Weight; use super::*; +// A simple trait to get the weight of some object. +pub trait GetWeight { + fn weight(&self) -> sp_weights::Weight; +} + /// Error codes used in XCM. The first errors codes have explicit indices and are part of the XCM /// format. Those trailing are merely part of the XCM implementation; there is no expectation that /// they will retain the same index over time. diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 77b6d915fcb..e1ca60087b1 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -16,7 +16,7 @@ //! Version 4 of the Cross-Consensus Message format data structures. -pub use super::v2::GetWeight; +pub use super::v3::GetWeight; use super::v3::{ Instruction as OldInstruction, PalletInfo as OldPalletInfo, QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 7760274f6e2..449cda3d232 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -124,7 +124,7 @@ mod tests { }; use parity_scale_codec::Encode; use polkadot_test_runtime::*; - use xcm::{v2, v3, VersionedXcm}; + use xcm::{v3, v4, VersionedXcm}; const ORIGIN: Junction = Junction::OnlyChild; /// The processor to use for tests. @@ -134,8 +134,8 @@ mod tests { #[test] fn process_message_trivial_works() { // ClearOrigin works. - assert!(process(v2_xcm(true)).unwrap()); assert!(process(v3_xcm(true)).unwrap()); + assert!(process(v4_xcm(true)).unwrap()); } #[test] @@ -194,7 +194,7 @@ mod tests { #[test] fn process_message_overweight_fails() { - for msg in [v3_xcm(true), v3_xcm(false), v3_xcm(false), v2_xcm(false)] { + for msg in [v4_xcm(true), v4_xcm(false), v4_xcm(false), v3_xcm(false)] { let msg = &msg.encode()[..]; // Errors if we stay below a weight limit of 1000. @@ -216,7 +216,7 @@ mod tests { } } - fn v2_xcm(success: bool) -> VersionedXcm { + fn v3_xcm(success: bool) -> VersionedXcm { let instr = if success { v3::Instruction::::ClearOrigin } else { @@ -225,13 +225,13 @@ mod tests { VersionedXcm::V3(v3::Xcm::(vec![instr])) } - fn v3_xcm(success: bool) -> VersionedXcm { + fn v4_xcm(success: bool) -> VersionedXcm { let instr = if success { - v2::Instruction::::ClearOrigin + v4::Instruction::::ClearOrigin } else { - v2::Instruction::::Trap(1) + v4::Instruction::::Trap(1) }; - VersionedXcm::V2(v2::Xcm::(vec![instr])) + VersionedXcm::V4(v4::Xcm::(vec![instr])) } fn process(msg: VersionedXcm) -> Result { diff --git a/prdoc/pr_4131.prdoc b/prdoc/pr_4131.prdoc new file mode 100644 index 00000000000..b0619eabe13 --- /dev/null +++ b/prdoc/pr_4131.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Deprecate XCMv2 + +doc: + - audience: Runtime Dev + description: | + XCMv2 has been deprecated. It will be removed when XCMv5 is released. + Use version 3 or 4 instead. + - audience: Runtime User + description: | + XCMv2 has been deprecated. It will be removed when XCMv5 is released. + Use version 3 or 4 instead. + +crates: +- name: staging-xcm + bump: minor +- name: xcm-procedural + bump: minor +- name: staging-xcm-builder + bump: minor +- name: pallet-xcm + bump: minor +- name: cumulus-pallet-xcmp-queue + bump: minor -- GitLab From f6cca7ee187d0946e4f3d1fa33928beacfce6e40 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 27 May 2024 10:23:40 +0300 Subject: [PATCH 064/106] Bridge: check submit_finality_proof limits before submission (#4549) closes https://github.com/paritytech/parity-bridges-common/issues/2982 closes https://github.com/paritytech/parity-bridges-common/issues/2730 The main change is in the bridges/relays/lib-substrate-relay/src/finality/target.rs, changes in other files are just moving the code ~I haven't been able to run zn tests locally - don't know why, but it keeps failing for me locally with: ` Error running script: /home/svyatonik/dev/polkadot-sdk/bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js Error: Timeout(300), "custom-js /home/svyatonik/dev/polkadot-sdk/bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js within 300 secs" didn't complete on time.`~ The issue was an obsolete `polkadot-js-api` binary - did `yarn global upgrade` and it is ok now --- bridges/modules/grandpa/src/call_ext.rs | 56 ++++----------- bridges/primitives/header-chain/src/lib.rs | 68 ++++++++++++++++++- bridges/relays/client-substrate/src/error.rs | 7 ++ .../src/finality/target.rs | 10 +++ .../src/finality_base/engine.rs | 44 +++--------- .../src/on_demand/headers.rs | 13 ++-- 6 files changed, 111 insertions(+), 87 deletions(-) diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index 98fbeaa30bb..f08eb4c5d1a 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -18,12 +18,8 @@ use crate::{ weights::WeightInfo, BestFinalized, BridgedBlockNumber, BridgedHeader, Config, CurrentAuthoritySet, Error, FreeHeadersRemaining, Pallet, }; -use bp_header_chain::{ - justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size, - ChainWithGrandpa, GrandpaConsensusLogReader, -}; +use bp_header_chain::{justification::GrandpaJustification, submit_finality_proof_limits_extras}; use bp_runtime::{BlockNumberOf, Chain, OwnedBridgeModule}; -use codec::Encode; use frame_support::{ dispatch::CallableCallFor, traits::{Get, IsSubType}, @@ -303,53 +299,31 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( current_set_id: Option, is_free_execution_expected: bool, ) -> SubmitFinalityProofInfo> { - let block_number = *finality_target.number(); - - // the `submit_finality_proof` call will reject justifications with invalid, duplicate, - // unknown and extra signatures. It'll also reject justifications with less than necessary - // signatures. So we do not care about extra weight because of additional signatures here. - let precommits_len = justification.commit.precommits.len().saturated_into(); - let required_precommits = precommits_len; + // check if call exceeds limits. In other words - whether some size or weight is included + // in the call + let extras = + submit_finality_proof_limits_extras::(finality_target, justification); // We do care about extra weight because of more-than-expected headers in the votes // ancestries. But we have problems computing extra weight for additional headers (weight of // additional header is too small, so that our benchmarks aren't detecting that). So if there // are more than expected headers in votes ancestries, we will treat the whole call weight // as an extra weight. - let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); - let extra_weight = - if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY { - T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) - } else { - Weight::zero() - }; - - // check if the `finality_target` is a mandatory header. If so, we are ready to refund larger - // size - let is_mandatory_finality_target = - GrandpaConsensusLogReader::>::find_scheduled_change( - finality_target.digest(), - ) - .is_some(); - - // we can estimate extra call size easily, without any additional significant overhead - let actual_call_size: u32 = finality_target - .encoded_size() - .saturating_add(justification.encoded_size()) - .saturated_into(); - let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::( - is_mandatory_finality_target, - required_precommits, - ); - let extra_size = actual_call_size.saturating_sub(max_expected_call_size); + let extra_weight = if extras.is_weight_limit_exceeded { + let precommits_len = justification.commit.precommits.len().saturated_into(); + let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); + T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) + } else { + Weight::zero() + }; SubmitFinalityProofInfo { - block_number, + block_number: *finality_target.number(), current_set_id, - is_mandatory: is_mandatory_finality_target, + is_mandatory: extras.is_mandatory_finality_target, is_free_execution_expected, extra_weight, - extra_size, + extra_size: extras.extra_size, } } diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index ad496012c6a..af2afb65a26 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -24,8 +24,8 @@ use crate::justification::{ GrandpaJustification, JustificationVerificationContext, JustificationVerificationError, }; use bp_runtime::{ - BasicOperatingMode, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, StorageProofChecker, - StorageProofError, UnderlyingChainProvider, + BasicOperatingMode, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, + StorageProofChecker, StorageProofError, UnderlyingChainProvider, }; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; @@ -35,7 +35,7 @@ use serde::{Deserialize, Serialize}; use sp_consensus_grandpa::{ AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID, }; -use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug}; +use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug, SaturatedConversion}; use sp_std::{boxed::Box, vec::Vec}; pub mod justification; @@ -325,6 +325,68 @@ where const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; } +/// Result of checking maximal expected submit finality proof call weight and size. +#[derive(Debug)] +pub struct SubmitFinalityProofCallExtras { + /// If true, the call weight is larger than what we have assumed. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for weight above that limit, is never refunded. + pub is_weight_limit_exceeded: bool, + /// Extra size (in bytes) that we assume are included in the call. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for bytes above that limit, is never refunded. + pub extra_size: u32, + /// A flag that is true if the header is the mandatory header that enacts new + /// authorities set. + pub is_mandatory_finality_target: bool, +} + +/// Checks whether the given `header` and its finality `proof` fit the maximal expected +/// call limits (size and weight). The submission may be refunded sometimes (see pallet +/// configuration for details), but it should fit some limits. If the call has some extra +/// weight and/or size included, though, we won't refund it or refund will be partial. +pub fn submit_finality_proof_limits_extras( + header: &C::Header, + proof: &justification::GrandpaJustification, +) -> SubmitFinalityProofCallExtras { + // the `submit_finality_proof` call will reject justifications with invalid, duplicate, + // unknown and extra signatures. It'll also reject justifications with less than necessary + // signatures. So we do not care about extra weight because of additional signatures here. + let precommits_len = proof.commit.precommits.len().saturated_into(); + let required_precommits = precommits_len; + + // the weight check is simple - we assume that there are no more than the `limit` + // headers in the ancestry proof + let votes_ancestries_len: u32 = proof.votes_ancestries.len().saturated_into(); + let is_weight_limit_exceeded = + votes_ancestries_len > C::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; + + // check if the `finality_target` is a mandatory header. If so, we are ready to refund larger + // size + let is_mandatory_finality_target = + GrandpaConsensusLogReader::>::find_scheduled_change(header.digest()) + .is_some(); + + // we can estimate extra call size easily, without any additional significant overhead + let actual_call_size: u32 = + header.encoded_size().saturating_add(proof.encoded_size()).saturated_into(); + let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::( + is_mandatory_finality_target, + required_precommits, + ); + let extra_size = actual_call_size.saturating_sub(max_expected_call_size); + + SubmitFinalityProofCallExtras { + is_weight_limit_exceeded, + extra_size, + is_mandatory_finality_target, + } +} + /// Returns maximal expected size of `submit_finality_proof` call arguments. pub fn max_expected_submit_finality_proof_arguments_size( is_mandatory_finality_target: bool, diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs index 0b446681818..2133c188878 100644 --- a/bridges/relays/client-substrate/src/error.rs +++ b/bridges/relays/client-substrate/src/error.rs @@ -17,6 +17,7 @@ //! Substrate node RPC errors. use crate::SimpleRuntimeVersion; +use bp_header_chain::SubmitFinalityProofCallExtras; use bp_polkadot_core::parachains::ParaId; use jsonrpsee::core::ClientError as RpcError; use relay_utils::MaybeConnectionError; @@ -129,6 +130,12 @@ pub enum Error { /// Actual runtime version. actual: SimpleRuntimeVersion, }, + /// Finality proof submission exceeds size and/or weight limits. + #[error("Finality proof submission exceeds limits: {extras:?}")] + FinalityProofWeightLimitExceeded { + /// Finality proof submission extras. + extras: SubmitFinalityProofCallExtras, + }, /// Custom logic error. #[error("{0}")] Custom(String), diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 0874fa53549..52ab2462c62 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -137,6 +137,16 @@ impl TargetClient: Send { @@ -129,12 +115,11 @@ pub trait Engine: Send { ) -> Result; /// Checks whether the given `header` and its finality `proof` fit the maximal expected - /// call size limit. If result is `MaxExpectedCallSizeCheck::Exceeds { .. }`, this - /// submission won't be fully refunded and relayer will spend its own funds on that. - fn check_max_expected_call_size( + /// call limits (size and weight). + fn check_max_expected_call_limits( header: &C::Header, proof: &Self::FinalityProof, - ) -> MaxExpectedCallSizeCheck; + ) -> SubmitFinalityProofCallExtras; /// Prepare initialization data for the finality bridge pallet. async fn prepare_initialization_data( @@ -245,22 +230,11 @@ impl Engine for Grandpa { }) } - fn check_max_expected_call_size( + fn check_max_expected_call_limits( header: &C::Header, proof: &Self::FinalityProof, - ) -> MaxExpectedCallSizeCheck { - let is_mandatory = Self::ConsensusLogReader::schedules_authorities_change(header.digest()); - let call_size: u32 = - header.encoded_size().saturating_add(proof.encoded_size()).saturated_into(); - let max_call_size = max_expected_submit_finality_proof_arguments_size::( - is_mandatory, - proof.commit.precommits.len().saturated_into(), - ); - if call_size > max_call_size { - MaxExpectedCallSizeCheck::Exceeds { call_size, max_call_size } - } else { - MaxExpectedCallSizeCheck::Ok - } + ) -> SubmitFinalityProofCallExtras { + bp_header_chain::submit_finality_proof_limits_extras::(header, proof) } /// Prepare initialization data for the GRANDPA verifier pallet. diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs index 74f3a70c5e8..202f53ea4e4 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs @@ -16,9 +16,7 @@ //! On-demand Substrate -> Substrate header finality relay. -use crate::{ - finality::SubmitFinalityProofCallBuilder, finality_base::engine::MaxExpectedCallSizeCheck, -}; +use crate::finality::SubmitFinalityProofCallBuilder; use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; @@ -156,22 +154,21 @@ impl OnDemandRelay Date: Mon, 27 May 2024 10:33:14 +0300 Subject: [PATCH 065/106] Bridge: add subcommand to relay messages delivery confirmation (#4453) related to https://github.com/paritytech/parity-bridges-common/issues/2962 on top of #4383 Example: ```sh RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ ./target/release/substrate-relay relay-messages-delivery-confirmation bridge-hub-rococo-to-bridge-hub-westend \ --source-host localhost \ --source-port 8943 \ --source-version-mode Auto \ --source-signer //Eve \ --source-transactions-mortality 4 \ --target-host localhost \ --target-port 8945 \ --target-version-mode Auto \ --lane 00000002 \ --at-target-block 49 ``` --- .../src/cli/relay_messages.rs | 51 +++++++++++++++++++ .../lib-substrate-relay/src/messages_lane.rs | 42 ++++++++++++++- .../src/messages_target.rs | 19 ++++--- bridges/relays/messages/src/lib.rs | 1 + .../messages/src/message_race_receiving.rs | 39 +++++++++++++- 5 files changed, 143 insertions(+), 9 deletions(-) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index e5b07b24158..943feba072e 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -80,6 +80,24 @@ pub struct RelayMessagesRangeParams { target_sign: TargetSigningParams, } +/// Messages delivery confirmation relaying params. +#[derive(StructOpt)] +pub struct RelayMessagesDeliveryConfirmationParams { + /// Number of the target chain header that we will use to prepare a messages + /// delivery proof. This header must be previously proved to the source chain. + #[structopt(long)] + at_target_block: u128, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + #[structopt(flatten)] + target: TargetConnectionParams, +} + /// Trait used for relaying messages between 2 chains. #[async_trait] pub trait MessagesRelayer: MessagesCliBridge @@ -154,4 +172,37 @@ where ) .await } + + /// Relay a messages delivery confirmation. + async fn relay_messages_delivery_confirmation( + data: RelayMessagesDeliveryConfirmationParams, + ) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let source_sign = data.source_sign.to_keypair::()?; + let source_transactions_mortality = data.source_sign.transactions_mortality()?; + + let at_target_block = target_client + .header_by_number(data.at_target_block.unique_saturated_into()) + .await + .map_err(|e| { + log::trace!( + target: "bridge", + "Failed to read {} header with number {}: {e:?}", + Self::Target::NAME, + data.at_target_block, + ); + anyhow::format_err!("The command has failed") + })? + .id(); + + crate::messages_lane::relay_messages_delivery_confirmation::( + source_client, + target_client, + TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, + at_target_block, + data.lane.into(), + ) + .await + } } diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index a34b165289b..08550d19bae 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -262,7 +262,7 @@ where source_client, params.lane_id, relayer_id_at_source, - params.target_transaction_params, + Some(params.target_transaction_params), params.source_to_target_headers_relay, ), { @@ -307,7 +307,7 @@ where source_client, lane_id, relayer_id_at_source, - target_transaction_params, + Some(target_transaction_params), None, ), at_source_block, @@ -318,6 +318,44 @@ where .map_err(|_| anyhow::format_err!("The command has failed")) } +/// Relay messages delivery confirmation of Substrate-to-Substrate messages. +/// No checks are made to ensure that transaction will succeed. +pub async fn relay_messages_delivery_confirmation( + source_client: Client, + target_client: Client, + source_transaction_params: TransactionParams>, + at_target_block: HeaderIdOf, + lane_id: LaneId, +) -> anyhow::Result<()> +where + AccountIdOf: From< as Pair>::Public>, + AccountIdOf: From< as Pair>::Public>, + BalanceOf: TryFrom>, +{ + let relayer_id_at_source: AccountIdOf = + source_transaction_params.signer.public().into(); + messages_relay::relay_messages_delivery_confirmation( + SubstrateMessagesSource::

::new( + source_client.clone(), + target_client.clone(), + lane_id, + source_transaction_params, + None, + ), + SubstrateMessagesTarget::

::new( + target_client, + source_client, + lane_id, + relayer_id_at_source, + None, + None, + ), + at_target_block, + ) + .await + .map_err(|_| anyhow::format_err!("The command has failed")) +} + /// Different ways of building `receive_messages_proof` calls. pub trait ReceiveMessagesProofCallBuilder { /// Given messages proof, build call of `receive_messages_proof` function of bridge diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 633b11f0b80..5ffb2b6c771 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -40,8 +40,8 @@ use messages_relay::{ message_lane_loop::{NoncesSubmitArtifacts, TargetClient, TargetClientState}, }; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, CallOf, Client, Error as SubstrateError, HashOf, - TransactionEra, TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, BalanceOf, CallOf, Chain, Client, Error as SubstrateError, + HashOf, TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; @@ -57,7 +57,7 @@ pub struct SubstrateMessagesTarget { source_client: Client, lane_id: LaneId, relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, + transaction_params: Option>>, source_to_target_headers_relay: Option>>, } @@ -68,7 +68,7 @@ impl SubstrateMessagesTarget

{ source_client: Client, lane_id: LaneId, relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, + transaction_params: Option>>, source_to_target_headers_relay: Option< Arc>, >, @@ -249,11 +249,18 @@ where None => messages_proof_call, }; - let transaction_params = self.transaction_params.clone(); + let transaction_params = self.transaction_params.clone().map(Ok).unwrap_or_else(|| { + // this error shall never happen in practice, so it not deserves + // a separate error variant + Err(SubstrateError::Custom(format!( + "Cannot sign transaction of {} chain", + P::TargetChain::NAME, + ))) + })?; let tx_tracker = self .target_client .submit_and_watch_signed_extrinsic( - &self.transaction_params.signer, + &transaction_params.signer, move |best_block_id, transaction_nonce| { Ok(UnsignedTransaction::new(final_call.into(), transaction_nonce) .era(TransactionEra::new(best_block_id, transaction_params.mortality))) diff --git a/bridges/relays/messages/src/lib.rs b/bridges/relays/messages/src/lib.rs index 7c18b6b148f..78a3237ba4f 100644 --- a/bridges/relays/messages/src/lib.rs +++ b/bridges/relays/messages/src/lib.rs @@ -37,3 +37,4 @@ mod message_race_receiving; mod message_race_strategy; pub use message_race_delivery::relay_messages_range; +pub use message_race_receiving::relay_messages_delivery_confirmation; diff --git a/bridges/relays/messages/src/message_race_receiving.rs b/bridges/relays/messages/src/message_race_receiving.rs index e6497a1b79e..ac4149b22d7 100644 --- a/bridges/relays/messages/src/message_race_receiving.rs +++ b/bridges/relays/messages/src/message_race_receiving.rs @@ -30,7 +30,7 @@ use crate::{ use async_trait::async_trait; use bp_messages::MessageNonce; use futures::stream::FusedStream; -use relay_utils::FailedClient; +use relay_utils::{FailedClient, TrackedTransactionStatus, TransactionTracker}; use std::{marker::PhantomData, ops::RangeInclusive}; /// Message receiving confirmations delivery strategy. @@ -69,6 +69,43 @@ pub async fn run( .await } +/// Relay messages delivery confirmation. +pub async fn relay_messages_delivery_confirmation( + source_client: impl MessageLaneSourceClient

, + target_client: impl MessageLaneTargetClient

, + at: TargetHeaderIdOf

, +) -> Result<(), ()> { + // prepare messages delivery proof + let (at, proof) = target_client.prove_messages_receiving(at.clone()).await.map_err(|e| { + log::error!( + target: "bridge", + "Failed to generate messages delivery proof at {:?}: {:?}", + at, + e, + ); + })?; + // submit messages delivery proof to the source node + let tx_tracker = + source_client + .submit_messages_receiving_proof(None, at, proof) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to submit messages delivery proof: {:?}", + e, + ); + })?; + + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => { + log::error!("Transaction with messages delivery proof is considered lost"); + Err(()) + }, + } +} + /// Messages receiving confirmations race. struct ReceivingConfirmationsRace

(std::marker::PhantomData

); -- GitLab From 89b67bc69e5b8cdad49c378f6db0a48873548d35 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 27 May 2024 09:41:51 +0200 Subject: [PATCH 066/106] chore: fix typos (#4590) chore: fix typos --- bridges/docs/running-relayer.md | 6 +++--- docs/RELEASE.md | 2 +- .../roadmap/implementers-guide/src/runtime/scheduler.md | 2 +- prdoc/schema_user.json | 2 +- substrate/bin/utils/subkey/README.md | 4 ++-- substrate/client/transaction-pool/README.md | 2 +- substrate/frame/contracts/README.md | 2 +- .../utils/frame/benchmarking-cli/src/overhead/README.md | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/bridges/docs/running-relayer.md b/bridges/docs/running-relayer.md index 710810a476e..594cbc35a10 100644 --- a/bridges/docs/running-relayer.md +++ b/bridges/docs/running-relayer.md @@ -139,7 +139,7 @@ your transactions that are **validated** on top of block, where it is active get becomes expired when the block with the number you have specified during registration is "mined". It is the `validTill` parameter of the `register` call (see below). After that `validTill` block, you may unregister and get your reserved funds back. There's also an intermediate point between those blocks - it is the `validTill - LEASE`, -where `LEASE` is the the chain constant, controlled by the governance. Initially it is set to `300` blocks. +where `LEASE` is the chain constant, controlled by the governance. Initially it is set to `300` blocks. All your transactions, **validated** between the `validTill - LEASE` and `validTill` blocks do not get the priority boost. Also, it is forbidden to specify `validTill` such that the `validTill - currentBlock` is less than the `LEASE`. @@ -156,7 +156,7 @@ than the `LEASE`. | 700 | Inactive | Your message delivery transactions are not boosted | | 701 | Inactive | Your message delivery transactions are not boosted | | ... | Inactive | Your message delivery transactions are not boosted | -| 1000 | Expired | Your may submit a tx with the `deregister` call | +| 1000 | Expired | You may submit a tx with the `deregister` call | @@ -230,7 +230,7 @@ your relayer account. Then: - set the `bridgedChainId` to `bhpd`; -- check the both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions +- check both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions and `BridgedChain` is used to pay for message confirmation transactions. If check shows that you have some rewards, you can craft the claim transaction, with similar parameters. diff --git a/docs/RELEASE.md b/docs/RELEASE.md index e73be2779a9..653e6a2a3e9 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -45,7 +45,7 @@ variable. ## Westend & Rococo -For the these networks, in addition to incrementing the `Cargo.toml` version we also increment the `spec_version` and +For these networks, in addition to incrementing the `Cargo.toml` version we also increment the `spec_version` and sometimes the `transaction_version`. The spec version is also following the node version. Its schema is: `M_mmm_ppp` and for example `1_002_000` is the node release `1.2.0`. This versioning has no further meaning, and is only done to map from an on chain `spec_version` easily to the release in this repository. diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 083ed2b6fea..be1e71666ad 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -1,7 +1,7 @@ # Scheduler Pallet > TODO: this section is still heavily under construction. key questions about availability cores and validator -> assignment are still open and the flow of the the section may be contradictory or inconsistent +> assignment are still open and the flow of the section may be contradictory or inconsistent The Scheduler module is responsible for two main tasks: diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 294005f209d..e6c0468aaf8 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -218,7 +218,7 @@ }, "doc": { "type": "object", - "description": "You have the the option to provide different description of your PR for different audiences.", + "description": "You have the option to provide different description of your PR for different audiences.", "additionalProperties": false, "properties": { "audience": { diff --git a/substrate/bin/utils/subkey/README.md b/substrate/bin/utils/subkey/README.md index fc1053e232d..5c6dda37edf 100644 --- a/substrate/bin/utils/subkey/README.md +++ b/substrate/bin/utils/subkey/README.md @@ -74,7 +74,7 @@ The output above shows a **secret phrase** (also called **mnemonic phrase**) and **Private Key**). Those 2 secrets are the pieces of information you MUST keep safe and secret. All the other information below can be derived from those secrets. -The output above also show the **public key** and the **Account ID**. Those are the independent from the network where +The output above also shows the **public key** and the **Account ID**. Those are the independent from the network where you will use the key. The **SS58 address** (or **Public Address**) of a new account is a representation of the public keys of an account for @@ -152,7 +152,7 @@ subkey inspect "soup lyrics media market way crouch elevator put moon useful que which recovers the account `5Fe4sqj2K4fRuzEGvToi4KATqZfiDU7TqynjXG6PZE2dxwyh` and not `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC` as we expected. The additional user-defined **password** -(`extra_secret` in our example) is now required to fully recover the account. Let's inspect the the previous mnemonic, +(`extra_secret` in our example) is now required to fully recover the account. Let's inspect the previous mnemonic, this time passing also the required `password` as shown below: ```bash diff --git a/substrate/client/transaction-pool/README.md b/substrate/client/transaction-pool/README.md index 7a53727d576..30a3a8118b5 100644 --- a/substrate/client/transaction-pool/README.md +++ b/substrate/client/transaction-pool/README.md @@ -49,7 +49,7 @@ pool, it's broadcasting status, block inclusion, finality, etc. ## Transaction Validity details -Information retrieved from the the runtime are encapsulated in the `TransactionValidity` +Information retrieved from the runtime are encapsulated in the `TransactionValidity` type. ```rust diff --git a/substrate/frame/contracts/README.md b/substrate/frame/contracts/README.md index 2e70b5c5008..6440f14b9ec 100644 --- a/substrate/frame/contracts/README.md +++ b/substrate/frame/contracts/README.md @@ -112,7 +112,7 @@ Contracts can emit messages to the client when called as RPC through the API. This is exposed in [ink!](https://use.ink) via [`ink_env::debug_message()`](https://paritytech.github.io/ink/ink_env/fn.debug_message.html). -Those messages are gathered into an internal buffer and sent to the RPC client. It is up the the individual client if +Those messages are gathered into an internal buffer and sent to the RPC client. It is up to the individual client if and how those messages are presented to the user. This buffer is also printed as a debug message. In order to see these messages on the node console the log level for the diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/README.md b/substrate/utils/frame/benchmarking-cli/src/overhead/README.md index 648908010ba..cee095fb8ca 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/README.md @@ -108,7 +108,7 @@ The complete command for Polkadot looks like this: cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ ``` -This will overwrite the the +This will overwrite the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) -- GitLab From e0edb062e55e80cf21490fb140e4bbc3b7d7c89d Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 27 May 2024 10:42:51 +0200 Subject: [PATCH 067/106] Add release version to commits and branch names of template synchronization job (#4353) Just to have some information what is the release number that was used to push a particular commit or PR in the templates repositories. --- .github/workflows/misc-sync-templates.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml index 3617d6c34a3..2699ff0fed3 100644 --- a/.github/workflows/misc-sync-templates.yml +++ b/.github/workflows/misc-sync-templates.yml @@ -148,12 +148,12 @@ jobs: token: ${{ steps.app_token.outputs.token }} add-paths: | ./* - title: "[Don't merge] Update the ${{ matrix.template }} template" + title: "[Don't merge] Update the ${{ matrix.template }} template to ${{ github.event.inputs.crate_release_version }}" body: "The template has NOT been successfully built and needs to be inspected." - branch: "update-template/${{ github.event_name }}" + branch: "update-template/${{ github.event.inputs.crate_release_version }}" - name: Push changes run: | git add -A . - git commit --allow-empty -m "Update template triggered by ${{ github.event_name }}" + git commit --allow-empty -m "Update to ${{ github.event.inputs.crate_release_version }} triggered by ${{ github.event_name }}" git push working-directory: "${{ env.template-path }}" -- GitLab From 2352982717edc8976b55525274b1f9c9aa01aadd Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 27 May 2024 11:39:56 +0200 Subject: [PATCH 068/106] Make markdown lint CI job pass (#4593) Was constantly failing, so here a fix. --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e139dc0ee07..f15c716a811 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ ![GitHub stars](https://img.shields.io/github/stars/paritytech/polkadot-sdk)  ![GitHub forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) + [![StackExchange](https://img.shields.io/badge/StackExchange-Community%20&%20Support-222222?logo=stackexchange)](https://substrate.stackexchange.com/)  ![GitHub contributors](https://img.shields.io/github/contributors/paritytech/polkadot-sdk)  ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/paritytech/polkadot-sdk) ![GitHub lines of code](https://tokei.rs/b1/github/paritytech/polkadot-sdk)   @@ -30,7 +31,7 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## ๐Ÿš€ Releases -> [!NOTE] +> [!NOTE] > Our release process is still Work-In-Progress and may not yet reflect the aspired outline > here. @@ -62,9 +63,10 @@ Conduct](./docs/contributor/CODE_OF_CONDUCT.md). ### ๐Ÿ‘พ Ready to Contribute? -Take a look at the issues labeled with [`mentor`](https://github.com/paritytech/polkadot-sdk/labels/C1-mentor) (or alternatively [this](https://mentor.tasty.limo/) page, created by one of -the maintainers) label to get started! We always recognize valuable contributions by proposing an -on-chain tip to the Polkadot network as a token of our appreciation. +Take a look at the issues labeled with [`mentor`](https://github.com/paritytech/polkadot-sdk/labels/C1-mentor) +(or alternatively [this](https://mentor.tasty.limo/) page, created by one of the maintainers) label to get started! +We always recognize valuable contributions by proposing an on-chain tip to the Polkadot network as a token of our +appreciation. ## Polkadot Fellowship -- GitLab From ce3e9b7c7099034e8ee30e4c7c912e3ed068bf8a Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Mon, 27 May 2024 16:55:34 +0300 Subject: [PATCH 069/106] network: Update litep2p to v0.5.0 (#4570) ## [0.5.0] - 2023-05-24 This is a small patch release that makes the `FindNode` command a bit more robst: - The `FindNode` command now retains the K (replication factor) best results. - The `FindNode` command has been updated to handle errors and unexpected states without panicking. ### Changed - kad: Refactor FindNode query, keep K best results and add tests ([#114](https://github.com/paritytech/litep2p/pull/114)) --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 82 ++++++++++++------- substrate/client/network/Cargo.toml | 2 +- .../client/network/src/litep2p/discovery.rs | 15 ++-- substrate/client/network/src/litep2p/mod.rs | 24 ++++-- substrate/client/network/types/Cargo.toml | 2 +- 5 files changed, 79 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1767245c6ab..82dfd34c252 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2366,9 +2366,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bzip2-sys" @@ -5339,9 +5339,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -6175,9 +6175,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -7956,9 +7956,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf107268459b653df189050c9ae2301253b9c62ceafa993dc69dad29870155c" +checksum = "7f02542ae3a94b4c4ffa37dc56388c923e286afa3bf65452e3984b50b2a2f316" dependencies = [ "async-trait", "bs58 0.4.0", @@ -7970,7 +7970,7 @@ dependencies = [ "hex-literal", "indexmap 2.2.3", "libc", - "mockall", + "mockall 0.12.1", "multiaddr", "multihash 0.17.0", "network-interface", @@ -8480,11 +8480,26 @@ dependencies = [ "downcast", "fragile", "lazy_static", - "mockall_derive", + "mockall_derive 0.11.4", "predicates 2.1.5", "predicates-tree", ] +[[package]] +name = "mockall" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive 0.12.1", + "predicates 3.0.3", + "predicates-tree", +] + [[package]] name = "mockall_derive" version = "0.11.4" @@ -8497,6 +8512,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "mockall_derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +dependencies = [ + "cfg-if", + "proc-macro2 1.0.82", + "quote 1.0.35", + "syn 2.0.61", +] + [[package]] name = "multiaddr" version = "0.17.1" @@ -16817,7 +16844,7 @@ dependencies = [ "futures", "futures-timer", "log", - "mockall", + "mockall 0.11.4", "parking_lot 0.12.1", "sc-client-api", "sc-network-types", @@ -17342,7 +17369,7 @@ dependencies = [ "linked_hash_set", "litep2p", "log", - "mockall", + "mockall 0.11.4", "multistream-select", "once_cell", "parity-scale-codec", @@ -17480,7 +17507,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "mockall", + "mockall 0.11.4", "parity-scale-codec", "prost 0.12.4", "prost-build 0.12.4", @@ -18231,9 +18258,9 @@ dependencies = [ [[package]] name = "sctp-proto" -version = "0.1.7" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f64cef148d3295c730c3cb340b0b252a4d570b1c7d4bf0808f88540b0a888bc" +checksum = "b6220f78bb44c15f326b0596113305f6101097a18755d53727a575c97e09fb24" dependencies = [ "bytes", "crc", @@ -18802,9 +18829,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol" @@ -20679,17 +20706,17 @@ dependencies = [ [[package]] name = "str0m" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3f10d3f68e60168d81110410428a435dbde28cc5525f5f7c6fdec92dbdc2800" +checksum = "6706347e49b13373f7ddfafad47df7583ed52083d6fc8a594eb2c80497ef959d" dependencies = [ "combine", "crc", + "fastrand 2.1.0", "hmac 0.12.1", "once_cell", "openssl", "openssl-sys", - "rand 0.8.5", "sctp-proto", "serde", "sha-1 0.10.1", @@ -21361,7 +21388,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.1.0", "redox_syscall 0.4.1", "rustix 0.38.21", "windows-sys 0.48.0", @@ -21553,9 +21580,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] @@ -21582,9 +21609,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.35", @@ -21828,9 +21855,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -21838,7 +21865,6 @@ dependencies = [ "futures-sink", "pin-project-lite 0.2.12", "tokio", - "tracing", ] [[package]] diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index b06d9c73540..29b14a4511c 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -59,7 +59,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } wasm-timer = "0.2" -litep2p = "0.4.0" +litep2p = "0.5.0" once_cell = "1.18.0" void = "1.0.2" schnellru = "0.2.1" diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 351380755db..ff5f492df24 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -34,7 +34,7 @@ use litep2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent, - KademliaHandle, QueryId, Quorum, Record, RecordKey, + KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, }, ping::{Config as PingConfig, PingEvent}, }, @@ -123,8 +123,8 @@ pub enum DiscoveryEvent { /// Query ID. query_id: QueryId, - /// Record. - record: Record, + /// Records. + records: RecordsType, }, /// Record was successfully stored on the DHT. @@ -460,16 +460,13 @@ impl Stream for Discovery { peers: peers.into_iter().collect(), })) }, - Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, record })) => { + Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, records })) => { log::trace!( target: LOG_TARGET, - "`GET_RECORD` succeeded for {query_id:?}: {record:?}", + "`GET_RECORD` succeeded for {query_id:?}: {records:?}", ); - return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { - query_id, - record: record.record, - })); + return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, records })); }, Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) => return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })), diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 67085a81a5c..ae287052b2d 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -56,7 +56,10 @@ use litep2p::{ crypto::ed25519::Keypair, executor::Executor, protocol::{ - libp2p::{bitswap::Config as BitswapConfig, kademlia::QueryId}, + libp2p::{ + bitswap::Config as BitswapConfig, + kademlia::{QueryId, RecordsType}, + }, request_response::ConfigBuilder as RequestResponseConfigBuilder, }, transport::{ @@ -796,23 +799,30 @@ impl NetworkBackend for Litep2pNetworkBac self.peerstore_handle.add_known_peer(peer.into()); } } - Some(DiscoveryEvent::GetRecordSuccess { query_id, record }) => { + Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => { match self.pending_get_values.remove(&query_id) { None => log::warn!( target: LOG_TARGET, "`GET_VALUE` succeeded for a non-existent query", ), - Some((_key, started)) => { + Some((key, started)) => { log::trace!( target: LOG_TARGET, "`GET_VALUE` for {:?} ({query_id:?}) succeeded", - record.key, + key, ); - self.event_streams.send(Event::Dht( - DhtEvent::ValueFound(vec![ + let value_found = match records { + RecordsType::LocalStore(record) => vec![ (libp2p::kad::RecordKey::new(&record.key), record.value) - ]) + ], + RecordsType::Network(records) => records.into_iter().map(|peer_record| { + (libp2p::kad::RecordKey::new(&peer_record.record.key), peer_record.record.value) + }).collect(), + }; + + self.event_streams.send(Event::Dht( + DhtEvent::ValueFound(value_found) )); if let Some(ref metrics) = self.metrics { diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index ed89eca2dd1..a9334aaa170 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sc-network-types" bs58 = "0.5.0" ed25519-dalek = "2.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = "0.4.0" +litep2p = "0.5.0" multiaddr = "0.17.0" multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } rand = "0.8.5" -- GitLab From 16887b6fd5ea637f3c2891d4a41180e9534e63db Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 27 May 2024 17:10:23 +0200 Subject: [PATCH 070/106] chain-spec-builder: help updated (#4597) Added some clarification on output file. --- substrate/bin/utils/chain-spec-builder/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 7982da76227..167704d3633 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -206,9 +206,14 @@ struct NamedPresetCmd { /// /// The code field of the chain spec will be updated with the runtime provided in the /// command line. This operation supports both plain and raw formats. +/// +/// This command does not update chain-spec file in-place. The result of this command will be stored +/// in a file given as `-c/--chain-spec-path` command line argument. #[derive(Parser, Debug, Clone)] pub struct UpdateCodeCmd { /// Chain spec to be updated. + /// + /// Please note that the file will not be updated in-place. pub input_chain_spec: PathBuf, /// The path to new runtime wasm blob to be stored into chain-spec. pub runtime_wasm_path: PathBuf, -- GitLab From 70dd67a5d129745da6a05bce958824504a4c9d83 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 27 May 2024 19:12:46 +0200 Subject: [PATCH 071/106] check-weight: Disable total pov size check for mandatory extrinsics (#4571) So in some pallets we like [here](https://github.com/paritytech/polkadot-sdk/blob/5dc522d02fe0b53be1517f8b8979176e489a388b/substrate/frame/session/src/lib.rs#L556) we use `max_block` as return value for `on_initialize` (ideally we would not). This means the block is already full when we try to apply the inherents, which lead to the error seen in #4559 because we are unable to include the required inherents. This was not erroring before #4326 because we were running into this branch: https://github.com/paritytech/polkadot-sdk/blob/e4b89cc50c8d17868d6c8b122f2e156d678c7525/substrate/frame/system/src/extensions/check_weight.rs#L222-L224 The inherents are of `DispatchClass::Mandatory` and therefore have a `reserved` value of `None` in all runtimes I have inspected. So they will always pass the normal check. So in this PR I adjust the `check_combined_proof_size` to return an early `Ok(())` for mandatory extrinsics. If we agree on this PR I will backport it to the 1.12.0 branch. closes #4559 --------- Co-authored-by: command-bot <> --- prdoc/pr_4571.prdoc | 19 +++ .../system/src/extensions/check_weight.rs | 130 +++++++++++++++--- 2 files changed, 129 insertions(+), 20 deletions(-) create mode 100644 prdoc/pr_4571.prdoc diff --git a/prdoc/pr_4571.prdoc b/prdoc/pr_4571.prdoc new file mode 100644 index 00000000000..b03fee8a5cc --- /dev/null +++ b/prdoc/pr_4571.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Ignore mandatory extrinsics in total PoV size check + +doc: + - audience: Runtime Dev + description: | + The `CheckWeight` extension is checking that extrinsic length and used storage proof + weight together do not exceed the PoV size limit. This lead to problems when + the PoV size was already reached before mandatory extrinsics were applied.The `CheckWeight` + extension will now allow extrinsics of `DispatchClass::Mandatory` to be applied even if + the limit is reached. + +crates: + - name: frame-system + bump: minor + - name: polkadot-sdk + bump: minor diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 061d543f8c3..5d6c68989ed 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; +use crate::{limits::BlockWeights, Config, DispatchClass, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, @@ -107,7 +107,7 @@ where let maximum_weight = T::BlockWeights::get(); let next_weight = calculate_consumed_weight::(&maximum_weight, all_weight, info)?; - check_combined_proof_size(&maximum_weight, next_len, &next_weight)?; + check_combined_proof_size::(info, &maximum_weight, next_len, &next_weight)?; Self::check_extrinsic_weight(info)?; crate::AllExtrinsicsLen::::put(next_len); @@ -131,22 +131,31 @@ where } /// Check that the combined extrinsic length and proof size together do not exceed the PoV limit. -pub fn check_combined_proof_size( +pub fn check_combined_proof_size( + info: &DispatchInfoOf, maximum_weight: &BlockWeights, next_len: u32, next_weight: &crate::ConsumedWeight, -) -> Result<(), TransactionValidityError> { +) -> Result<(), TransactionValidityError> +where + Call: Dispatchable, +{ // This extra check ensures that the extrinsic length does not push the // PoV over the limit. let total_pov_size = next_weight.total().proof_size().saturating_add(next_len as u64); if total_pov_size > maximum_weight.max_block.proof_size() { log::debug!( target: LOG_TARGET, - "Extrinsic exceeds total pov size: {}kb, limit: {}kb", + "Extrinsic exceeds total pov size. Still including if mandatory. size: {}kb, limit: {}kb, is_mandatory: {}", total_pov_size as f64/1024.0, - maximum_weight.max_block.proof_size() as f64/1024.0 + maximum_weight.max_block.proof_size() as f64/1024.0, + info.class == DispatchClass::Mandatory ); - return Err(InvalidTransaction::ExhaustsResources.into()) + return match info.class { + // Allow mandatory extrinsics + DispatchClass::Mandatory => Ok(()), + _ => Err(InvalidTransaction::ExhaustsResources.into()), + }; } Ok(()) } @@ -190,7 +199,7 @@ where "Exceeded the per-class allowance.", ); - return Err(InvalidTransaction::ExhaustsResources.into()) + return Err(InvalidTransaction::ExhaustsResources.into()); }, // There is no `max_total` limit (`None`), // or we are below the limit. @@ -208,7 +217,7 @@ where "Total block weight is exceeded.", ); - return Err(InvalidTransaction::ExhaustsResources.into()) + return Err(InvalidTransaction::ExhaustsResources.into()); }, // There is either no limit in reserved pool (`None`), // or we are below the limit. @@ -791,6 +800,8 @@ mod tests { assert_eq!(maximum_weight.max_block, Weight::from_parts(20, 10)); + let info = DispatchInfo { class: DispatchClass::Normal, ..Default::default() }; + let mandatory = DispatchInfo { class: DispatchClass::Mandatory, ..Default::default() }; // We have 10 reftime and 5 proof size left over. let next_weight = crate::ConsumedWeight::new(|class| match class { DispatchClass::Normal => Weight::from_parts(10, 5), @@ -799,12 +810,33 @@ mod tests { }); // Simple checks for the length - assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); - assert_ok!(check_combined_proof_size(&maximum_weight, 5, &next_weight)); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 5, + &next_weight + )); assert_err!( - check_combined_proof_size(&maximum_weight, 6, &next_weight), + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 6, + &next_weight + ), InvalidTransaction::ExhaustsResources ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 6, + &next_weight + )); // We have 10 reftime and 0 proof size left over. let next_weight = crate::ConsumedWeight::new(|class| match class { @@ -812,11 +844,27 @@ mod tests { DispatchClass::Operational => Weight::from_parts(0, 0), DispatchClass::Mandatory => Weight::zero(), }); - assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); assert_err!( - check_combined_proof_size(&maximum_weight, 1, &next_weight), + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 1, + &next_weight + ), InvalidTransaction::ExhaustsResources ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 1, + &next_weight + )); // We have 10 reftime and 2 proof size left over. // Used weight is spread across dispatch classes this time. @@ -825,12 +873,33 @@ mod tests { DispatchClass::Operational => Weight::from_parts(0, 3), DispatchClass::Mandatory => Weight::zero(), }); - assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); - assert_ok!(check_combined_proof_size(&maximum_weight, 2, &next_weight)); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 2, + &next_weight + )); assert_err!( - check_combined_proof_size(&maximum_weight, 3, &next_weight), + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 3, + &next_weight + ), InvalidTransaction::ExhaustsResources ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 3, + &next_weight + )); // Ref time is over the limit. Should not happen, but we should make sure that it is // ignored. @@ -839,11 +908,32 @@ mod tests { DispatchClass::Operational => Weight::from_parts(0, 0), DispatchClass::Mandatory => Weight::zero(), }); - assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); - assert_ok!(check_combined_proof_size(&maximum_weight, 5, &next_weight)); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 5, + &next_weight + )); assert_err!( - check_combined_proof_size(&maximum_weight, 6, &next_weight), + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 6, + &next_weight + ), InvalidTransaction::ExhaustsResources ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 6, + &next_weight + )); } } -- GitLab From a7097681b76bdaef21dcde9aec8c33205f480e44 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 27 May 2024 21:23:58 +0200 Subject: [PATCH 072/106] [subsystem-benchmarks] Add statement-distribution benchmarks (#3863) Fixes https://github.com/paritytech/polkadot-sdk/issues/3748 Adds a subsystem benchmark for statements-distribution subsystem. Results in CI (reference hw): ``` $ cargo bench -p polkadot-statement-distribution --bench statement-distribution-regression-bench --features subsystem-benchmarks [Sent to peers] standart_deviation 0.07% [Received from peers] standart_deviation 0.00% [statement-distribution] standart_deviation 0.97% [test-environment] standart_deviation 1.03% Network usage, KiB total per block Received from peers 1088.0000 108.8000 Sent to peers 1238.1800 123.8180 CPU usage, seconds total per block statement-distribution 0.3897 0.0390 test-environment 0.4715 0.0472 ``` --- .gitlab/pipeline/publish.yml | 4 + .gitlab/pipeline/test.yml | 7 + Cargo.lock | 2 + .../network/statement-distribution/Cargo.toml | 10 + ...statement-distribution-regression-bench.rs | 78 +++ .../network/statement-distribution/src/lib.rs | 1 - polkadot/node/subsystem-bench/Cargo.toml | 1 + .../examples/statement_distribution.yaml | 5 + .../src/cli/subsystem-bench.rs | 14 +- .../subsystem-bench/src/lib/approval/mod.rs | 10 +- .../src/lib/availability/mod.rs | 3 +- .../subsystem-bench/src/lib/configuration.rs | 31 +- polkadot/node/subsystem-bench/src/lib/lib.rs | 3 +- .../subsystem-bench/src/lib/mock/av_store.rs | 3 +- .../src/lib/mock/candidate_backing.rs | 171 +++++++ .../node/subsystem-bench/src/lib/mock/mod.rs | 2 + .../src/lib/mock/network_bridge.rs | 49 +- .../src/lib/mock/prospective_parachains.rs | 74 +++ .../src/lib/mock/runtime_api.rs | 83 +++- .../node/subsystem-bench/src/lib/network.rs | 90 +++- .../subsystem-bench/src/lib/statement/mod.rs | 450 ++++++++++++++++++ .../src/lib/statement/test_state.rs | 436 +++++++++++++++++ 22 files changed, 1480 insertions(+), 47 deletions(-) create mode 100644 polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs create mode 100644 polkadot/node/subsystem-bench/examples/statement_distribution.yaml create mode 100644 polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs create mode 100644 polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs create mode 100644 polkadot/node/subsystem-bench/src/lib/statement/mod.rs create mode 100644 polkadot/node/subsystem-bench/src/lib/statement/test_state.rs diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 8b27c724748..44cd1933a9c 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -76,6 +76,8 @@ publish-subsystem-benchmarks: artifacts: true - job: subsystem-benchmark-approval-voting artifacts: true + - job: subsystem-benchmark-statement-distribution + artifacts: true - job: publish-rustdoc artifacts: false script: @@ -119,6 +121,8 @@ trigger_workflow: artifacts: true - job: subsystem-benchmark-approval-voting artifacts: true + - job: subsystem-benchmark-statement-distribution + artifacts: true script: - echo "Triggering workflow" - > diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 1851581746a..d171a8a1942 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -630,3 +630,10 @@ subsystem-benchmark-approval-voting: script: - cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks allow_failure: true + +subsystem-benchmark-statement-distribution: + extends: + - .subsystem-benchmark-template + script: + - cargo bench -p polkadot-statement-distribution --bench statement-distribution-regression-bench --features subsystem-benchmarks + allow_failure: true diff --git a/Cargo.lock b/Cargo.lock index 82dfd34c252..acbda4f0326 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14474,6 +14474,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand_chacha 0.3.1", "sc-keystore", "sc-network", @@ -14538,6 +14539,7 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-statement-distribution", "prometheus", "pyroscope", "pyroscope_pprofrs", diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 1fe761bd0e3..65224f9e2be 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -42,3 +42,13 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } rand_chacha = "0.3" +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[bench]] +name = "statement-distribution-regression-bench" +path = "benches/statement-distribution-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs new file mode 100644 index 00000000000..abcb1e6783f --- /dev/null +++ b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs @@ -0,0 +1,78 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! statement-distribution regression tests +//! +//! Statement distribution benchmark based on Kusama parameters and scale. + +use polkadot_subsystem_bench::{ + configuration::TestConfiguration, + statement::{benchmark_statement_distribution, prepare_test, TestState}, + usage::BenchmarkUsage, + utils::save_to_file, +}; +use std::io::Write; + +const BENCH_COUNT: usize = 50; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + let mut config = TestConfiguration::default(); + config.n_cores = 100; + config.n_validators = 500; + config.num_blocks = 10; + config.connectivity = 100; + config.generate_pov_sizes(); + let state = TestState::new(&config); + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = prepare_test(&state, false); + env.runtime().block_on(benchmark_statement_distribution( + "statement-distribution", + &mut env, + &state, + )) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/statement-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); + + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 106.4000, 0.001), + ("Sent to peers", 127.9100, 0.001), + ])); + messages.extend(average_usage.check_cpu_usage(&[("statement-distribution", 0.0390, 0.1)])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 4ca199c3378..4d56c795f13 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -19,7 +19,6 @@ //! This is responsible for distributing signed statements about candidate //! validity among validators. -#![deny(unused_crate_dependencies)] #![warn(missing_docs)] use error::{log_error, FatalResult}; diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 37c6681b273..21eaed832c4 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -28,6 +28,7 @@ polkadot-primitives = { path = "../../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-availability-recovery = { path = "../network/availability-recovery", features = ["subsystem-benchmarks"] } polkadot-availability-distribution = { path = "../network/availability-distribution" } +polkadot-statement-distribution = { path = "../network/statement-distribution" } polkadot-node-core-av-store = { path = "../core/av-store" } polkadot-node-core-chain-api = { path = "../core/chain-api" } polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution" } diff --git a/polkadot/node/subsystem-bench/examples/statement_distribution.yaml b/polkadot/node/subsystem-bench/examples/statement_distribution.yaml new file mode 100644 index 00000000000..e86669ffefc --- /dev/null +++ b/polkadot/node/subsystem-bench/examples/statement_distribution.yaml @@ -0,0 +1,5 @@ +TestConfiguration: +- objective: StatementDistribution + num_blocks: 10 + n_cores: 100 + n_validators: 500 diff --git a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index 10953b6c783..1e921500a4d 100644 --- a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -20,7 +20,7 @@ use clap::Parser; use color_eyre::eyre; use colored::Colorize; -use polkadot_subsystem_bench::{approval, availability, configuration}; +use polkadot_subsystem_bench::{approval, availability, configuration, statement}; use pyroscope::PyroscopeAgent; use pyroscope_pprofrs::{pprof_backend, PprofConfig}; use serde::{Deserialize, Serialize}; @@ -40,6 +40,8 @@ pub enum TestObjective { DataAvailabilityWrite, /// Benchmark the approval-voting and approval-distribution subsystems. ApprovalVoting(approval::ApprovalsOptions), + // Benchmark the statement-distribution subsystem + StatementDistribution, } impl std::fmt::Display for TestObjective { @@ -51,6 +53,7 @@ impl std::fmt::Display for TestObjective { Self::DataAvailabilityRead(_) => "DataAvailabilityRead", Self::DataAvailabilityWrite => "DataAvailabilityWrite", Self::ApprovalVoting(_) => "ApprovalVoting", + Self::StatementDistribution => "StatementDistribution", } ) } @@ -170,6 +173,15 @@ impl BenchCli { state, )) }, + TestObjective::StatementDistribution => { + let state = statement::TestState::new(&test_config); + let (mut env, _protocol_config) = statement::prepare_test(&state, true); + env.runtime().block_on(statement::benchmark_statement_distribution( + &benchmark_name, + &mut env, + &state, + )) + }, }; println!("{}", usage); } diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 6ac0776d2d3..4a479b6af29 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -30,7 +30,7 @@ use crate::{ mock::{ chain_api::{ChainApiState, MockChainApi}, network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::MockRuntimeApi, + runtime_api::{MockRuntimeApi, MockRuntimeApiCoreState}, AlwaysSupportsParachains, TestSyncOracle, }, network::{ @@ -465,8 +465,9 @@ impl ApprovalTestState { } } +#[async_trait::async_trait] impl HandleNetworkMessage for ApprovalTestState { - fn handle( + async fn handle( &self, _message: crate::network::NetworkMessage, _node_sender: &mut futures::channel::mpsc::UnboundedSender, @@ -807,6 +808,7 @@ fn build_overseer( state.candidate_events_by_block(), Some(state.babe_epoch.clone()), 1, + MockRuntimeApiCoreState::Occupied, ); let mock_tx_bridge = MockNetworkBridgeTx::new( network.clone(), @@ -915,7 +917,9 @@ pub async fn bench_approvals_run( // First create the initialization messages that make sure that then node under // tests receives notifications about the topology used and the connected peers. - let mut initialization_messages = env.network().generate_peer_connected(); + let mut initialization_messages = env.network().generate_peer_connected(|e| { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(e)) + }); initialization_messages.extend(generate_new_session_topology( &state.test_authorities, ValidatorIndex(NODE_UNDER_TEST), diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 5b93c3d862d..f7d65589565 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -22,7 +22,7 @@ use crate::{ av_store::{self, MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::{self, MockRuntimeApi}, + runtime_api::{self, MockRuntimeApi, MockRuntimeApiCoreState}, AlwaysSupportsParachains, }, network::new_network, @@ -189,6 +189,7 @@ pub fn prepare_test( Default::default(), Default::default(), 0, + MockRuntimeApiCoreState::Occupied, ); let (overseer, overseer_handle) = match &mode { diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs index 1e0efb72a7d..f614a5e552a 100644 --- a/polkadot/node/subsystem-bench/src/lib/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -18,12 +18,13 @@ use crate::keyring::Keyring; use itertools::Itertools; -use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId}; +use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId, ValidatorPair}; use rand::thread_rng; use rand_distr::{Distribution, Normal, Uniform}; use sc_network_types::PeerId; use serde::{Deserialize, Serialize}; use sp_consensus_babe::AuthorityId; +use sp_core::Pair; use std::collections::HashMap; /// Peer networking latency configuration. @@ -89,6 +90,15 @@ fn default_n_delay_tranches() -> usize { fn default_no_show_slots() -> usize { 3 } +fn default_minimum_backing_votes() -> u32 { + 2 +} +fn default_max_candidate_depth() -> u32 { + 3 +} +fn default_allowed_ancestry_len() -> u32 { + 2 +} /// The test input parameters #[derive(Clone, Debug, Serialize, Deserialize)] @@ -137,6 +147,15 @@ pub struct TestConfiguration { pub connectivity: usize, /// Number of blocks to run the test for pub num_blocks: usize, + /// Number of minimum backing votes + #[serde(default = "default_minimum_backing_votes")] + pub minimum_backing_votes: u32, + /// Async Backing max_candidate_depth + #[serde(default = "default_max_candidate_depth")] + pub max_candidate_depth: u32, + /// Async Backing allowed_ancestry_len + #[serde(default = "default_allowed_ancestry_len")] + pub allowed_ancestry_len: u32, } impl Default for TestConfiguration { @@ -158,6 +177,9 @@ impl Default for TestConfiguration { latency: default_peer_latency(), connectivity: default_connectivity(), num_blocks: Default::default(), + minimum_backing_votes: default_minimum_backing_votes(), + max_candidate_depth: default_max_candidate_depth(), + allowed_ancestry_len: default_allowed_ancestry_len(), } } } @@ -208,6 +230,11 @@ impl TestConfiguration { .map(|(peer_id, authority_id)| (*peer_id, authority_id.clone())) .collect(); + let validator_pairs = key_seeds + .iter() + .map(|seed| ValidatorPair::from_string_with_seed(seed, None).unwrap().0) + .collect(); + TestAuthorities { keyring, validator_public, @@ -217,6 +244,7 @@ impl TestConfiguration { validator_assignment_id, key_seeds, peer_id_to_authority, + validator_pairs, } } } @@ -246,6 +274,7 @@ pub struct TestAuthorities { pub key_seeds: Vec, pub peer_ids: Vec, pub peer_id_to_authority: HashMap, + pub validator_pairs: Vec, } /// Sample latency (in milliseconds) from a normal distribution with parameters diff --git a/polkadot/node/subsystem-bench/src/lib/lib.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs index ef2724abc98..e18227af8be 100644 --- a/polkadot/node/subsystem-bench/src/lib/lib.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -// The validator index that represent the node that is under test. +// The validator index that represents the node that is under test. pub const NODE_UNDER_TEST: u32 = 0; pub mod approval; @@ -25,5 +25,6 @@ pub(crate) mod environment; pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; +pub mod statement; pub mod usage; pub mod utils; diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index fba33523be8..a035bf01897 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -49,8 +49,9 @@ pub struct NetworkAvailabilityState { } // Implement access to the state. +#[async_trait::async_trait] impl HandleNetworkMessage for NetworkAvailabilityState { - fn handle( + async fn handle( &self, message: NetworkMessage, _node_sender: &mut futures::channel::mpsc::UnboundedSender, diff --git a/polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs b/polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs new file mode 100644 index 00000000000..51494016e18 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs @@ -0,0 +1,171 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A generic candidate backing subsystem mockup suitable to be used in benchmarks. + +use crate::{configuration::TestConfiguration, NODE_UNDER_TEST}; +use futures::FutureExt; +use polkadot_node_primitives::{SignedFullStatementWithPVD, Statement, StatementWithPVD}; +use polkadot_node_subsystem::{ + messages::CandidateBackingMessage, overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::OverseerSignal; +use polkadot_primitives::{ + CandidateHash, Hash, PersistedValidationData, SigningContext, ValidatorIndex, ValidatorPair, +}; +use sp_core::Pair; +use std::collections::HashMap; + +const LOG_TARGET: &str = "subsystem-bench::candidate-backing-mock"; + +struct MockCandidateBackingState { + pair: ValidatorPair, + pvd: PersistedValidationData, + own_backing_group: Vec, +} + +pub struct MockCandidateBacking { + config: TestConfiguration, + state: MockCandidateBackingState, +} + +impl MockCandidateBacking { + pub fn new( + config: TestConfiguration, + pair: ValidatorPair, + pvd: PersistedValidationData, + own_backing_group: Vec, + ) -> Self { + Self { config, state: MockCandidateBackingState { pair, pvd, own_backing_group } } + } + + fn handle_statement( + &self, + relay_parent: Hash, + statement: SignedFullStatementWithPVD, + statements_tracker: &mut HashMap, + ) -> Vec { + let mut messages = vec![]; + let validator_id = statement.validator_index(); + let is_own_backing_group = self.state.own_backing_group.contains(&validator_id); + + match statement.payload() { + StatementWithPVD::Seconded(receipt, _pvd) => { + let candidate_hash = receipt.hash(); + statements_tracker + .entry(candidate_hash) + .and_modify(|v| { + *v += 1; + }) + .or_insert(1); + + let statements_received_count = *statements_tracker.get(&candidate_hash).unwrap(); + if statements_received_count == (self.config.minimum_backing_votes - 1) && + is_own_backing_group + { + let statement = Statement::Valid(candidate_hash); + let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let payload = statement.to_compact().signing_payload(&context); + let message = + polkadot_node_subsystem::messages::StatementDistributionMessage::Share( + relay_parent, + SignedFullStatementWithPVD::new( + statement.supply_pvd(self.state.pvd.clone()), + ValidatorIndex(NODE_UNDER_TEST), + self.state.pair.sign(&payload[..]), + &context, + &self.state.pair.public(), + ) + .unwrap(), + ); + messages.push(message); + } + + if statements_received_count == self.config.minimum_backing_votes { + let message = + polkadot_node_subsystem::messages::StatementDistributionMessage::Backed( + candidate_hash, + ); + messages.push(message); + } + }, + StatementWithPVD::Valid(candidate_hash) => { + statements_tracker + .entry(*candidate_hash) + .and_modify(|v| { + *v += 1; + }) + .or_insert(1); + + let statements_received_count = *statements_tracker.get(candidate_hash).unwrap(); + if statements_received_count == self.config.minimum_backing_votes { + let message = + polkadot_node_subsystem::messages::StatementDistributionMessage::Backed( + *candidate_hash, + ); + messages.push(message); + } + }, + } + + messages + } +} + +#[overseer::subsystem(CandidateBacking, error=SubsystemError, prefix=self::overseer)] +impl MockCandidateBacking { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +impl MockCandidateBacking { + async fn run(self, mut ctx: Context) { + let mut statements_tracker: HashMap = Default::default(); + + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => { + gum::trace!(target: LOG_TARGET, msg=?msg, "recv message"); + + match msg { + CandidateBackingMessage::Statement(relay_parent, statement) => { + let messages = self.handle_statement( + relay_parent, + statement, + &mut statements_tracker, + ); + for message in messages { + ctx.send_message(message).await; + } + }, + _ => { + unimplemented!("Unexpected candidate-backing message") + }, + } + }, + } + } + } +} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs index 6dda9a47d39..12766374bfa 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -19,9 +19,11 @@ use polkadot_node_subsystem_types::Hash; use sp_consensus::SyncOracle; pub mod av_store; +pub mod candidate_backing; pub mod chain_api; pub mod dummy; pub mod network_bridge; +pub mod prospective_parachains; pub mod runtime_api; pub struct AlwaysSupportsParachains {} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index ec66ad4e279..10508f456a4 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -27,14 +27,19 @@ use polkadot_node_subsystem::{ messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_types::{ - messages::{ApprovalDistributionMessage, BitfieldDistributionMessage, NetworkBridgeEvent}, + messages::{ + ApprovalDistributionMessage, BitfieldDistributionMessage, NetworkBridgeEvent, + StatementDistributionMessage, + }, OverseerSignal, }; use sc_network::{request_responses::ProtocolConfig, RequestFailure}; const LOG_TARGET: &str = "subsystem-bench::network-bridge"; -const CHUNK_REQ_PROTOCOL_NAME_V1: &str = - "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/1"; +const ALLOWED_PROTOCOLS: &[&str] = &[ + "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/1", + "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_attested_candidate/2", +]; /// A mock of the network bridge tx subsystem. pub struct MockNetworkBridgeTx { @@ -106,8 +111,15 @@ impl MockNetworkBridgeTx { NetworkBridgeTxMessage::SendRequests(requests, _if_disconnected) => { for request in requests { gum::debug!(target: LOG_TARGET, request = ?request, "Processing request"); - let peer_id = - request.authority_id().expect("all nodes are authorities").clone(); + let peer_id = match request.authority_id() { + Some(v) => v.clone(), + None => self + .test_authorities + .peer_id_to_authority + .get(request.peer_id().expect("Should exist")) + .expect("Should exist") + .clone(), + }; if !self.network.is_peer_connected(&peer_id) { // Attempting to send a request to a disconnected peer. @@ -141,7 +153,23 @@ impl MockNetworkBridgeTx { .expect("Should not fail"); } }, - _ => unimplemented!("Unexpected network bridge message"), + NetworkBridgeTxMessage::SendValidationMessages(messages) => { + for (peers, message) in messages { + for peer in peers { + self.to_network_interface + .unbounded_send(NetworkMessage::MessageFromNode( + self.test_authorities + .peer_id_to_authority + .get(&peer) + .unwrap() + .clone(), + message.clone(), + )) + .expect("Should not fail"); + } + } + }, + message => unimplemented!("Unexpected network bridge message {:?}", message), }, } } @@ -175,13 +203,20 @@ impl MockNetworkBridgeRx { ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) ).await; } + Versioned::V3( + polkadot_node_network_protocol::v3::ValidationProtocol::StatementDistribution(msg) + ) => { + ctx.send_message( + StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) + ).await; + } _ => { unimplemented!("We only talk v2 network protocol") }, }, NetworkMessage::RequestFromPeer(request) => { if let Some(protocol) = self.chunk_request_sender.as_mut() { - assert_eq!(&*protocol.name, CHUNK_REQ_PROTOCOL_NAME_V1); + assert!(ALLOWED_PROTOCOLS.contains(&&*protocol.name)); if let Some(inbound_queue) = protocol.inbound_queue.as_ref() { inbound_queue .send(request) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs b/polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs new file mode 100644 index 00000000000..8a865af21a0 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A generic prospective parachains subsystem mockup suitable to be used in benchmarks. + +use futures::FutureExt; +use polkadot_node_subsystem::{ + messages::ProspectiveParachainsMessage, overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::OverseerSignal; +use polkadot_primitives::Hash; + +pub struct MockProspectiveParachains {} + +impl MockProspectiveParachains { + pub fn new() -> Self { + Self {} + } +} + +#[overseer::subsystem(ProspectiveParachains, error=SubsystemError, prefix=self::overseer)] +impl MockProspectiveParachains { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +impl MockProspectiveParachains { + async fn run(self, mut ctx: Context) { + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => match msg { + ProspectiveParachainsMessage::GetMinimumRelayParents(_relay_parent, tx) => { + tx.send(vec![]).unwrap(); + }, + ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx) => { + tx.send( + req.candidates + .iter() + .cloned() + .map(|candidate| (candidate, vec![Hash::repeat_byte(0)])) + .collect(), + ) + .unwrap(); + }, + _ => { + unimplemented!("Unexpected chain-api message") + }, + }, + } + } + } +} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index b73d61321cd..9788a1123ec 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,8 +26,9 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, NodeFeatures, - OccupiedCore, SessionIndex, SessionInfo, ValidatorIndex, + AsyncBackingParams, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, GroupRotationInfo, + IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, SessionInfo, + ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; @@ -49,11 +50,20 @@ pub struct RuntimeApiState { session_index: SessionIndex, } +#[derive(Clone)] +pub enum MockRuntimeApiCoreState { + Occupied, + Scheduled, + #[allow(dead_code)] + Free, +} + /// A mocked `runtime-api` subsystem. #[derive(Clone)] pub struct MockRuntimeApi { state: RuntimeApiState, config: TestConfiguration, + core_state: MockRuntimeApiCoreState, } impl MockRuntimeApi { @@ -64,6 +74,7 @@ impl MockRuntimeApi { included_candidates: HashMap>, babe_epoch: Option, session_index: SessionIndex, + core_state: MockRuntimeApiCoreState, ) -> MockRuntimeApi { Self { state: RuntimeApiState { @@ -74,6 +85,7 @@ impl MockRuntimeApi { session_index, }, config, + core_state, } } @@ -198,16 +210,26 @@ impl MockRuntimeApi { // Ensure test breaks if badly configured. assert!(index < validator_group_count); - CoreState::Occupied(OccupiedCore { - next_up_on_available: None, - occupied_since: 0, - time_out_at: 0, - next_up_on_time_out: None, - availability: BitVec::default(), - group_responsible: GroupIndex(index as u32), - candidate_hash: candidate_receipt.hash(), - candidate_descriptor: candidate_receipt.descriptor.clone(), - }) + use MockRuntimeApiCoreState::*; + match self.core_state { + Occupied => CoreState::Occupied(OccupiedCore { + next_up_on_available: None, + occupied_since: 0, + time_out_at: 0, + next_up_on_time_out: None, + availability: BitVec::default(), + group_responsible: GroupIndex(index as u32), + candidate_hash: candidate_receipt.hash(), + candidate_descriptor: candidate_receipt + .descriptor + .clone(), + }), + Scheduled => CoreState::Scheduled(ScheduledCore { + para_id: (index + 1).into(), + collator: None, + }), + Free => todo!(), + } }) .collect::>(); @@ -223,6 +245,43 @@ impl MockRuntimeApi { .clone() .expect("Babe epoch unpopulated"))); }, + RuntimeApiMessage::Request( + _block_hash, + RuntimeApiRequest::AsyncBackingParams(sender), + ) => { + let _ = sender.send(Ok(AsyncBackingParams { + max_candidate_depth: self.config.max_candidate_depth, + allowed_ancestry_len: self.config.allowed_ancestry_len, + })); + }, + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::Version(tx)) => { + tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)) + .unwrap(); + }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::DisabledValidators(tx), + ) => { + tx.send(Ok(vec![])).unwrap(); + }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::MinimumBackingVotes(_session_index, tx), + ) => { + tx.send(Ok(self.config.minimum_backing_votes)).unwrap(); + }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + ) => { + let groups = self.session_info().validator_groups.to_vec(); + let group_rotation_info = GroupRotationInfo { + session_start_block: 1, + group_rotation_frequency: 12, + now: 1, + }; + tx.send(Ok((groups, group_rotation_info))).unwrap(); + }, // Long term TODO: implement more as needed. message => { unimplemented!("Unexpected runtime-api message: {:?}", message) diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 9bf2415e5a8..9686f456b9e 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -51,13 +51,14 @@ use futures::{ }; use itertools::Itertools; use net_protocol::{ - peer_set::{ProtocolVersion, ValidationVersion}, + peer_set::ValidationVersion, request_response::{Recipient, Requests, ResponseSender}, - ObservedRole, VersionedValidationProtocol, + ObservedRole, VersionedValidationProtocol, View, }; use parity_scale_codec::Encode; use polkadot_node_network_protocol::{self as net_protocol, Versioned}; -use polkadot_node_subsystem_types::messages::{ApprovalDistributionMessage, NetworkBridgeEvent}; +use polkadot_node_subsystem::messages::StatementDistributionMessage; +use polkadot_node_subsystem_types::messages::NetworkBridgeEvent; use polkadot_node_subsystem_util::metrics::prometheus::{ self, CounterVec, Opts, PrometheusError, Registry, }; @@ -437,6 +438,7 @@ pub struct EmulatedPeerHandle { /// Send actions to be performed by the peer. actions_tx: UnboundedSender, peer_id: PeerId, + authority_id: AuthorityDiscoveryId, } impl EmulatedPeerHandle { @@ -496,29 +498,31 @@ impl EmulatedPeer { } /// Interceptor pattern for handling messages. +#[async_trait::async_trait] pub trait HandleNetworkMessage { /// Returns `None` if the message was handled, or the `message` /// otherwise. /// /// `node_sender` allows sending of messages to the node in response /// to the handled message. - fn handle( + async fn handle( &self, message: NetworkMessage, node_sender: &mut UnboundedSender, ) -> Option; } +#[async_trait::async_trait] impl HandleNetworkMessage for Arc where - T: HandleNetworkMessage, + T: HandleNetworkMessage + Sync + Send, { - fn handle( + async fn handle( &self, message: NetworkMessage, node_sender: &mut UnboundedSender, ) -> Option { - self.as_ref().handle(message, node_sender) + T::handle(self, message, node_sender).await } } @@ -551,7 +555,7 @@ async fn emulated_peer_loop( for handler in handlers.iter() { // The check below guarantees that message is always `Some`: we are still // inside the loop. - message = handler.handle(message.unwrap(), &mut to_network_interface); + message = handler.handle(message.unwrap(), &mut to_network_interface).await; if message.is_none() { break } @@ -613,6 +617,7 @@ async fn emulated_peer_loop( } /// Creates a new peer emulator task and returns a handle to it. +#[allow(clippy::too_many_arguments)] pub fn new_peer( bandwidth: usize, spawn_task_handle: SpawnTaskHandle, @@ -621,6 +626,7 @@ pub fn new_peer( to_network_interface: UnboundedSender, latency_ms: usize, peer_id: PeerId, + authority_id: AuthorityDiscoveryId, ) -> EmulatedPeerHandle { let (messages_tx, messages_rx) = mpsc::unbounded::(); let (actions_tx, actions_rx) = mpsc::unbounded::(); @@ -649,7 +655,7 @@ pub fn new_peer( .boxed(), ); - EmulatedPeerHandle { messages_tx, actions_tx, peer_id } + EmulatedPeerHandle { messages_tx, actions_tx, peer_id, authority_id } } /// Book keeping of sent and received bytes. @@ -714,6 +720,18 @@ impl Peer { Peer::Disconnected(ref emulator) => emulator, } } + + pub fn authority_id(&self) -> AuthorityDiscoveryId { + match self { + Peer::Connected(handle) | Peer::Disconnected(handle) => handle.authority_id.clone(), + } + } + + pub fn peer_id(&self) -> PeerId { + match self { + Peer::Connected(handle) | Peer::Disconnected(handle) => handle.peer_id, + } + } } /// A ha emulated network implementation. @@ -728,21 +746,34 @@ pub struct NetworkEmulatorHandle { } impl NetworkEmulatorHandle { + pub fn generate_statement_distribution_peer_view_change(&self, view: View) -> Vec { + self.peers + .iter() + .filter(|peer| peer.is_connected()) + .map(|peer| { + AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerViewChange(peer.peer_id(), view.clone()), + ), + ) + }) + .collect_vec() + } + /// Generates peer_connected messages for all peers in `test_authorities` - pub fn generate_peer_connected(&self) -> Vec { + pub fn generate_peer_connected(&self, mapper: F) -> Vec + where + F: Fn(NetworkBridgeEvent) -> AllMessages, + { self.peers .iter() .filter(|peer| peer.is_connected()) .map(|peer| { - let network = NetworkBridgeEvent::PeerConnected( + mapper(NetworkBridgeEvent::PeerConnected( peer.handle().peer_id, - ObservedRole::Full, - ProtocolVersion::from(ValidationVersion::V3), - None, - ); - - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate( - network, + ObservedRole::Authority, + ValidationVersion::V3.into(), + Some(vec![peer.authority_id()].into_iter().collect()), )) }) .collect_vec() @@ -772,7 +803,7 @@ pub fn new_network( let (stats, mut peers): (_, Vec<_>) = (0..n_peers) .zip(authorities.validator_authority_id.clone()) .map(|(peer_index, authority_id)| { - validator_authority_id_mapping.insert(authority_id, peer_index); + validator_authority_id_mapping.insert(authority_id.clone(), peer_index); let stats = Arc::new(PeerEmulatorStats::new(peer_index, metrics.clone())); ( stats.clone(), @@ -784,6 +815,7 @@ pub fn new_network( to_network_interface.clone(), random_latency(config.latency.as_ref()), *authorities.peer_ids.get(peer_index).unwrap(), + authority_id, )), ) }) @@ -971,6 +1003,8 @@ impl Metrics { pub trait RequestExt { /// Get the authority id if any from the request. fn authority_id(&self) -> Option<&AuthorityDiscoveryId>; + /// Get the peer id if any from the request. + fn peer_id(&self) -> Option<&PeerId>; /// Consume self and return the response sender. fn into_response_sender(self) -> ResponseSender; /// Allows to change the `ResponseSender` in place. @@ -996,12 +1030,26 @@ impl RequestExt for Requests { None } }, + // Requested by PeerId + Requests::AttestedCandidateV2(_) => None, request => { unimplemented!("RequestAuthority not implemented for {:?}", request) }, } } + fn peer_id(&self) -> Option<&PeerId> { + match self { + Requests::AttestedCandidateV2(request) => match &request.peer { + Recipient::Authority(_) => None, + Recipient::Peer(peer_id) => Some(peer_id), + }, + request => { + unimplemented!("peer_id() is not implemented for {:?}", request) + }, + } + } + fn into_response_sender(self) -> ResponseSender { match self { Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.pending_response, @@ -1018,6 +1066,8 @@ impl RequestExt for Requests { std::mem::replace(&mut outgoing_request.pending_response, new_sender), Requests::AvailableDataFetchingV1(outgoing_request) => std::mem::replace(&mut outgoing_request.pending_response, new_sender), + Requests::AttestedCandidateV2(outgoing_request) => + std::mem::replace(&mut outgoing_request.pending_response, new_sender), _ => unimplemented!("unsupported request type"), } } @@ -1028,6 +1078,8 @@ impl RequestExt for Requests { Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), Requests::AvailableDataFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), + Requests::AttestedCandidateV2(outgoing_request) => + outgoing_request.payload.encoded_size(), _ => unimplemented!("received an unexpected request"), } } diff --git a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs new file mode 100644 index 00000000000..508dd9179f7 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs @@ -0,0 +1,450 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + configuration::TestAuthorities, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + mock::{ + candidate_backing::MockCandidateBacking, + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, + prospective_parachains::MockProspectiveParachains, + runtime_api::{MockRuntimeApi, MockRuntimeApiCoreState}, + AlwaysSupportsParachains, + }, + network::{new_network, NetworkEmulatorHandle, NetworkInterface, NetworkInterfaceReceiver}, + usage::BenchmarkUsage, + NODE_UNDER_TEST, +}; +use bitvec::vec::BitVec; +use colored::Colorize; +use itertools::Itertools; +use polkadot_node_metrics::metrics::Metrics; +use polkadot_node_network_protocol::{ + grid_topology::{SessionGridTopology, TopologyPeerInfo}, + request_response::{IncomingRequest, ReqProtocolNames}, + v3::{self, BackedCandidateManifest, StatementFilter}, + view, Versioned, View, +}; +use polkadot_node_subsystem::messages::{ + network_bridge_event::NewGossipTopology, AllMessages, NetworkBridgeEvent, + StatementDistributionMessage, +}; +use polkadot_overseer::{ + Handle as OverseerHandle, Overseer, OverseerConnector, OverseerMetrics, SpawnGlue, +}; +use polkadot_primitives::{ + AuthorityDiscoveryId, Block, GroupIndex, Hash, Id, ValidatorId, ValidatorIndex, +}; +use polkadot_statement_distribution::StatementDistributionSubsystem; +use rand::SeedableRng; +use sc_keystore::LocalKeystore; +use sc_network::request_responses::ProtocolConfig; +use sc_network_types::PeerId; +use sc_service::SpawnTaskHandle; +use sp_keystore::{Keystore, KeystorePtr}; +use sp_runtime::RuntimeAppPublic; +use std::{ + sync::{atomic::Ordering, Arc}, + time::{Duration, Instant}, +}; +pub use test_state::TestState; + +mod test_state; + +const LOG_TARGET: &str = "subsystem-bench::statement"; + +pub fn make_keystore() -> KeystorePtr { + let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); + Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some("//Node0")) + .expect("Insert key into keystore"); + Keystore::sr25519_generate_new(&*keystore, AuthorityDiscoveryId::ID, Some("//Node0")) + .expect("Insert key into keystore"); + keystore +} + +fn build_overseer( + state: &TestState, + network: NetworkEmulatorHandle, + network_interface: NetworkInterface, + network_receiver: NetworkInterfaceReceiver, + dependencies: &TestEnvironmentDependencies, +) -> ( + Overseer, AlwaysSupportsParachains>, + OverseerHandle, + Vec, +) { + let overseer_connector = OverseerConnector::with_event_capacity(64000); + let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap(); + let spawn_task_handle = dependencies.task_manager.spawn_handle(); + let mock_runtime_api = MockRuntimeApi::new( + state.config.clone(), + state.test_authorities.clone(), + state.candidate_receipts.clone(), + Default::default(), + Default::default(), + 0, + MockRuntimeApiCoreState::Scheduled, + ); + let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() }; + let mock_chain_api = MockChainApi::new(chain_api_state); + let mock_prospective_parachains = MockProspectiveParachains::new(); + let mock_candidate_backing = MockCandidateBacking::new( + state.config.clone(), + state + .test_authorities + .validator_pairs + .get(NODE_UNDER_TEST as usize) + .unwrap() + .clone(), + state.pvd.clone(), + state.own_backing_group.clone(), + ); + let (statement_req_receiver, statement_req_cfg) = IncomingRequest::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&ReqProtocolNames::new(GENESIS_HASH, None)); + let (candidate_req_receiver, candidate_req_cfg) = IncomingRequest::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&ReqProtocolNames::new(GENESIS_HASH, None)); + let keystore = make_keystore(); + let subsystem = StatementDistributionSubsystem::new( + keystore.clone(), + statement_req_receiver, + candidate_req_receiver, + Metrics::try_register(&dependencies.registry).unwrap(), + rand::rngs::StdRng::from_entropy(), + ); + let network_bridge_tx = MockNetworkBridgeTx::new( + network, + network_interface.subsystem_sender(), + state.test_authorities.clone(), + ); + let network_bridge_rx = MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg)); + + let dummy = dummy_builder!(spawn_task_handle, overseer_metrics) + .replace_runtime_api(|_| mock_runtime_api) + .replace_chain_api(|_| mock_chain_api) + .replace_prospective_parachains(|_| mock_prospective_parachains) + .replace_candidate_backing(|_| mock_candidate_backing) + .replace_statement_distribution(|_| subsystem) + .replace_network_bridge_tx(|_| network_bridge_tx) + .replace_network_bridge_rx(|_| network_bridge_rx); + let (overseer, raw_handle) = dummy.build_with_connector(overseer_connector).unwrap(); + let overseer_handle = OverseerHandle::new(raw_handle); + + (overseer, overseer_handle, vec![statement_req_cfg]) +} + +pub fn prepare_test( + state: &TestState, + with_prometheus_endpoint: bool, +) -> (TestEnvironment, Vec) { + let dependencies = TestEnvironmentDependencies::default(); + let (network, network_interface, network_receiver) = new_network( + &state.config, + &dependencies, + &state.test_authorities, + vec![Arc::new(state.clone())], + ); + let (overseer, overseer_handle, cfg) = + build_overseer(state, network.clone(), network_interface, network_receiver, &dependencies); + + ( + TestEnvironment::new( + dependencies, + state.config.clone(), + network, + overseer, + overseer_handle, + state.test_authorities.clone(), + with_prometheus_endpoint, + ), + cfg, + ) +} + +pub fn generate_peer_view_change(block_hash: Hash, peer_id: PeerId) -> AllMessages { + let network = NetworkBridgeEvent::PeerViewChange(peer_id, View::new([block_hash], 0)); + + AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(network)) +} + +pub fn generate_new_session_topology( + topology: &SessionGridTopology, + test_node: ValidatorIndex, +) -> Vec { + let event = NetworkBridgeEvent::NewGossipTopology(NewGossipTopology { + session: 0, + topology: topology.clone(), + local_index: Some(test_node), + }); + vec![AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate( + event, + ))] +} + +/// Generates a topology to be used for this benchmark. +pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopology { + let keyrings = test_authorities + .validator_authority_id + .clone() + .into_iter() + .zip(test_authorities.peer_ids.clone()) + .collect_vec(); + + let topology = keyrings + .clone() + .into_iter() + .enumerate() + .map(|(index, (discovery_id, peer_id))| TopologyPeerInfo { + peer_ids: vec![peer_id], + validator_index: ValidatorIndex(index as u32), + discovery_id, + }) + .collect_vec(); + let shuffled = (0..keyrings.len()).collect_vec(); + + SessionGridTopology::new(shuffled, topology) +} + +pub async fn benchmark_statement_distribution( + benchmark_name: &str, + env: &mut TestEnvironment, + state: &TestState, +) -> BenchmarkUsage { + state.reset_trackers(); + + let connected_validators = state + .test_authorities + .validator_authority_id + .iter() + .enumerate() + .filter_map(|(i, id)| if env.network().is_peer_connected(id) { Some(i) } else { None }) + .collect_vec(); + let seconding_validator_in_own_backing_group = state + .own_backing_group + .iter() + .find(|v| connected_validators.contains(&(v.0 as usize))) + .unwrap() + .to_owned(); + + let config = env.config().clone(); + let groups = state.session_info.validator_groups.clone(); + let own_backing_group_index = groups + .iter() + .position(|group| group.iter().any(|v| v.0 == NODE_UNDER_TEST)) + .unwrap(); + + env.metrics().set_n_validators(config.n_validators); + env.metrics().set_n_cores(config.n_cores); + + let topology = generate_topology(&state.test_authorities); + let peer_connected_messages = env.network().generate_peer_connected(|e| { + AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(e)) + }); + let new_session_topology_messages = + generate_new_session_topology(&topology, ValidatorIndex(NODE_UNDER_TEST)); + for message in peer_connected_messages.into_iter().chain(new_session_topology_messages) { + env.send_message(message).await; + } + + let test_start = Instant::now(); + let mut candidates_advertised = 0; + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; + gum::info!(target: LOG_TARGET, "Current block {}/{} {:?}", block_num, config.num_blocks, block_info.hash); + env.metrics().set_current_block(block_num); + env.import_block(block_info.clone()).await; + + for peer_view_change in env + .network() + .generate_statement_distribution_peer_view_change(view![block_info.hash]) + { + env.send_message(peer_view_change).await; + } + + let seconding_peer_id = *state + .test_authorities + .peer_ids + .get(seconding_validator_in_own_backing_group.0 as usize) + .unwrap(); + let candidate = state.candidate_receipts.get(&block_info.hash).unwrap().first().unwrap(); + let candidate_hash = candidate.hash(); + let statement = state + .statements + .get(&candidate_hash) + .unwrap() + .get(seconding_validator_in_own_backing_group.0 as usize) + .unwrap() + .clone(); + let message = AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + seconding_peer_id, + Versioned::V3(v3::StatementDistributionMessage::Statement( + block_info.hash, + statement, + )), + )), + ); + env.send_message(message).await; + + let max_messages_per_candidate = state.config.max_candidate_depth + 1; + // One was just sent for the own backing group + let mut messages_tracker = (0..groups.len()) + .map(|i| if i == own_backing_group_index { max_messages_per_candidate } else { 0 }) + .collect_vec(); + + let neighbors = + topology.compute_grid_neighbors_for(ValidatorIndex(NODE_UNDER_TEST)).unwrap(); + let connected_neighbors_x = neighbors + .validator_indices_x + .iter() + .filter(|&v| connected_validators.contains(&(v.0 as usize))) + .cloned() + .collect_vec(); + let connected_neighbors_y = neighbors + .validator_indices_y + .iter() + .filter(|&v| connected_validators.contains(&(v.0 as usize))) + .cloned() + .collect_vec(); + let one_hop_peers_and_groups = connected_neighbors_x + .iter() + .chain(connected_neighbors_y.iter()) + .map(|validator_index| { + let peer_id = + *state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap(); + let group_index = + groups.iter().position(|group| group.contains(validator_index)).unwrap(); + (peer_id, group_index) + }) + .collect_vec(); + let two_hop_x_peers_and_groups = connected_neighbors_x + .iter() + .flat_map(|validator_index| { + let peer_id = + *state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap(); + topology + .compute_grid_neighbors_for(*validator_index) + .unwrap() + .validator_indices_y + .iter() + .map(|validator_neighbor| { + let group_index = groups + .iter() + .position(|group| group.contains(validator_neighbor)) + .unwrap(); + (peer_id, group_index) + }) + .collect_vec() + }) + .collect_vec(); + let two_hop_y_peers_and_groups = connected_neighbors_y + .iter() + .flat_map(|validator_index| { + let peer_id = + *state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap(); + topology + .compute_grid_neighbors_for(*validator_index) + .unwrap() + .validator_indices_x + .iter() + .map(|validator_neighbor| { + let group_index = groups + .iter() + .position(|group| group.contains(validator_neighbor)) + .unwrap(); + (peer_id, group_index) + }) + .collect_vec() + }) + .collect_vec(); + + for (seconding_peer_id, group_index) in one_hop_peers_and_groups + .into_iter() + .chain(two_hop_x_peers_and_groups) + .chain(two_hop_y_peers_and_groups) + { + let messages_sent_count = messages_tracker.get_mut(group_index).unwrap(); + if *messages_sent_count == max_messages_per_candidate { + continue + } + *messages_sent_count += 1; + + let candidate_hash = state + .candidate_receipts + .get(&block_info.hash) + .unwrap() + .get(group_index) + .unwrap() + .hash(); + let manifest = BackedCandidateManifest { + relay_parent: block_info.hash, + candidate_hash, + group_index: GroupIndex(group_index as u32), + para_id: Id::new(group_index as u32 + 1), + parent_head_data_hash: state.pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: BitVec::from_iter( + groups.get(GroupIndex(group_index as u32)).unwrap().iter().map(|_| true), + ), + validated_in_group: BitVec::from_iter( + groups.get(GroupIndex(group_index as u32)).unwrap().iter().map(|_| false), + ), + }, + }; + let message = AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + seconding_peer_id, + Versioned::V3(v3::StatementDistributionMessage::BackedCandidateManifest( + manifest, + )), + )), + ); + env.send_message(message).await; + } + + candidates_advertised += messages_tracker.iter().filter(|&&v| v > 0).collect_vec().len(); + + loop { + let manifests_count = state + .manifests_tracker + .values() + .filter(|v| v.load(Ordering::SeqCst)) + .collect::>() + .len(); + gum::debug!(target: LOG_TARGET, "{}/{} manifest exchanges", manifests_count, candidates_advertised); + + if manifests_count == candidates_advertised { + break; + } + tokio::time::sleep(Duration::from_millis(50)).await; + } + } + + let duration: u128 = test_start.elapsed().as_millis(); + gum::info!(target: LOG_TARGET, "All blocks processed in {}", format!("{:?}ms", duration).cyan()); + gum::info!(target: LOG_TARGET, + "Avg block time: {}", + format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red() + ); + + env.stop().await; + env.collect_resource_usage(benchmark_name, &["statement-distribution"]) +} diff --git a/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs new file mode 100644 index 00000000000..b8ea64c7e33 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs @@ -0,0 +1,436 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + configuration::{TestAuthorities, TestConfiguration}, + mock::runtime_api::session_info_for_peers, + network::{HandleNetworkMessage, NetworkMessage}, + NODE_UNDER_TEST, +}; +use bitvec::vec::BitVec; +use futures::channel::oneshot; +use itertools::Itertools; +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_network_protocol::{ + request_response::{ + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, + Requests, + }, + v3::{ + BackedCandidateAcknowledgement, StatementDistributionMessage, StatementFilter, + ValidationProtocol, + }, + Versioned, +}; +use polkadot_node_primitives::{AvailableData, BlockData, PoV}; +use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, +}; +use polkadot_overseer::BlockInfo; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CompactStatement, + Hash, Header, Id, PersistedValidationData, SessionInfo, SignedStatement, SigningContext, + UncheckedSigned, ValidatorIndex, ValidatorPair, +}; +use polkadot_primitives_test_helpers::{ + dummy_committed_candidate_receipt, dummy_hash, dummy_head_data, dummy_pvd, +}; +use sc_network::{config::IncomingRequest, ProtocolName}; +use sp_core::{Pair, H256}; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +#[derive(Clone)] +pub struct TestState { + // Full test config + pub config: TestConfiguration, + // Authority keys for the network emulation. + pub test_authorities: TestAuthorities, + // Relay chain block infos + pub block_infos: Vec, + // Map from generated candidate receipts + pub candidate_receipts: HashMap>, + // Map from generated commited candidate receipts + pub commited_candidate_receipts: HashMap>, + // PersistedValidationData, we use one for all candidates + pub pvd: PersistedValidationData, + // Relay chain block headers + pub block_headers: HashMap, + // Session info + pub session_info: SessionInfo, + // Pregenerated statements + pub statements: HashMap>>, + // Indices in the backing group where the node under test is + pub own_backing_group: Vec, + // Tracks how many statements we received for a candidates + pub statements_tracker: HashMap>>, + // Tracks if manifest exchange happened + pub manifests_tracker: HashMap>, +} + +impl TestState { + pub fn new(config: &TestConfiguration) -> Self { + let test_authorities = config.generate_authorities(); + let session_info = session_info_for_peers(config, &test_authorities); + let own_backing_group = session_info + .validator_groups + .iter() + .find(|g| g.contains(&ValidatorIndex(NODE_UNDER_TEST))) + .unwrap() + .clone(); + let mut state = Self { + config: config.clone(), + test_authorities, + block_infos: (1..=config.num_blocks).map(generate_block_info).collect(), + candidate_receipts: Default::default(), + commited_candidate_receipts: Default::default(), + pvd: dummy_pvd(dummy_head_data(), 0), + block_headers: Default::default(), + statements_tracker: Default::default(), + manifests_tracker: Default::default(), + session_info, + own_backing_group, + statements: Default::default(), + }; + + state.block_headers = state.block_infos.iter().map(generate_block_header).collect(); + + // For each unique pov we create a candidate receipt. + let pov_sizes = Vec::from(config.pov_sizes()); // For n_cores + let pov_size_to_candidate = generate_pov_size_to_candidate(&pov_sizes); + let receipt_templates = + generate_receipt_templates(&pov_size_to_candidate, config.n_validators, &state.pvd); + + for block_info in state.block_infos.iter() { + for core_idx in 0..config.n_cores { + let pov_size = pov_sizes.get(core_idx).expect("This is a cycle; qed"); + let candidate_index = + *pov_size_to_candidate.get(pov_size).expect("pov_size always exists; qed"); + let mut receipt = receipt_templates[candidate_index].clone(); + receipt.descriptor.para_id = Id::new(core_idx as u32 + 1); + receipt.descriptor.relay_parent = block_info.hash; + + state.candidate_receipts.entry(block_info.hash).or_default().push( + CandidateReceipt { + descriptor: receipt.descriptor.clone(), + commitments_hash: receipt.commitments.hash(), + }, + ); + state.statements_tracker.entry(receipt.hash()).or_default().extend( + (0..config.n_validators) + .map(|_| Arc::new(AtomicBool::new(false))) + .collect_vec(), + ); + state.manifests_tracker.insert(receipt.hash(), Arc::new(AtomicBool::new(false))); + state + .commited_candidate_receipts + .entry(block_info.hash) + .or_default() + .push(receipt); + } + } + + let groups = state.session_info.validator_groups.clone(); + + for block_info in state.block_infos.iter() { + for (index, group) in groups.iter().enumerate() { + let candidate = + state.candidate_receipts.get(&block_info.hash).unwrap().get(index).unwrap(); + let statements = group + .iter() + .map(|&v| { + sign_statement( + CompactStatement::Seconded(candidate.hash()), + block_info.hash, + v, + state.test_authorities.validator_pairs.get(v.0 as usize).unwrap(), + ) + }) + .collect_vec(); + state.statements.insert(candidate.hash(), statements); + } + } + + state + } + + pub fn reset_trackers(&self) { + self.statements_tracker.values().for_each(|v| { + v.iter() + .enumerate() + .for_each(|(index, v)| v.as_ref().store(index <= 1, Ordering::SeqCst)) + }); + self.manifests_tracker + .values() + .for_each(|v| v.as_ref().store(false, Ordering::SeqCst)); + } +} + +fn sign_statement( + statement: CompactStatement, + relay_parent: H256, + validator_index: ValidatorIndex, + pair: &ValidatorPair, +) -> UncheckedSigned { + let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let payload = statement.signing_payload(&context); + + SignedStatement::new( + statement, + validator_index, + pair.sign(&payload[..]), + &context, + &pair.public(), + ) + .unwrap() + .as_unchecked() + .to_owned() +} + +fn generate_block_info(block_num: usize) -> BlockInfo { + new_block_import_info(Hash::repeat_byte(block_num as u8), block_num as BlockNumber) +} + +fn generate_block_header(info: &BlockInfo) -> (H256, Header) { + ( + info.hash, + Header { + digest: Default::default(), + number: info.number, + parent_hash: info.parent_hash, + extrinsics_root: Default::default(), + state_root: Default::default(), + }, + ) +} + +fn generate_pov_size_to_candidate(pov_sizes: &[usize]) -> HashMap { + pov_sizes + .iter() + .cloned() + .unique() + .enumerate() + .map(|(index, pov_size)| (pov_size, index)) + .collect() +} + +fn generate_receipt_templates( + pov_size_to_candidate: &HashMap, + n_validators: usize, + pvd: &PersistedValidationData, +) -> Vec { + pov_size_to_candidate + .iter() + .map(|(&pov_size, &index)| { + let mut receipt = dummy_committed_candidate_receipt(dummy_hash()); + let (_, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + n_validators, + &AvailableData { + validation_data: pvd.clone(), + pov: Arc::new(PoV { block_data: BlockData(vec![index as u8; pov_size]) }), + }, + |_, _| {}, + ); + receipt.descriptor.persisted_validation_data_hash = pvd.hash(); + receipt.descriptor.erasure_root = erasure_root; + receipt + }) + .collect() +} + +#[async_trait::async_trait] +impl HandleNetworkMessage for TestState { + async fn handle( + &self, + message: NetworkMessage, + node_sender: &mut futures::channel::mpsc::UnboundedSender, + ) -> Option { + match message { + NetworkMessage::RequestFromNode(_authority_id, Requests::AttestedCandidateV2(req)) => { + let payload = req.payload; + let candidate_receipt = self + .commited_candidate_receipts + .values() + .flatten() + .find(|v| v.hash() == payload.candidate_hash) + .unwrap() + .clone(); + let persisted_validation_data = self.pvd.clone(); + let statements = self.statements.get(&payload.candidate_hash).unwrap().clone(); + let res = AttestedCandidateResponse { + candidate_receipt, + persisted_validation_data, + statements, + }; + let _ = req.pending_response.send(Ok((res.encode(), ProtocolName::from("")))); + None + }, + NetworkMessage::MessageFromNode( + authority_id, + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::Statement(relay_parent, statement), + )), + ) => { + let index = self + .test_authorities + .validator_authority_id + .iter() + .position(|v| v == &authority_id) + .unwrap(); + let candidate_hash = *statement.unchecked_payload().candidate_hash(); + + let statements_sent_count = self + .statements_tracker + .get(&candidate_hash) + .unwrap() + .get(index) + .unwrap() + .as_ref(); + if statements_sent_count.load(Ordering::SeqCst) { + return None + } else { + statements_sent_count.store(true, Ordering::SeqCst); + } + + let group_statements = self.statements.get(&candidate_hash).unwrap(); + if !group_statements.iter().any(|s| s.unchecked_validator_index().0 == index as u32) + { + return None + } + + let statement = CompactStatement::Valid(candidate_hash); + let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let payload = statement.signing_payload(&context); + let pair = self.test_authorities.validator_pairs.get(index).unwrap(); + let signature = pair.sign(&payload[..]); + let statement = SignedStatement::new( + statement, + ValidatorIndex(index as u32), + signature, + &context, + &pair.public(), + ) + .unwrap() + .as_unchecked() + .to_owned(); + + node_sender + .start_send(NetworkMessage::MessageFromPeer( + *self.test_authorities.peer_ids.get(index).unwrap(), + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::Statement(relay_parent, statement), + )), + )) + .unwrap(); + None + }, + NetworkMessage::MessageFromNode( + authority_id, + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::BackedCandidateManifest(manifest), + )), + ) => { + let index = self + .test_authorities + .validator_authority_id + .iter() + .position(|v| v == &authority_id) + .unwrap(); + let backing_group = + self.session_info.validator_groups.get(manifest.group_index).unwrap(); + let group_size = backing_group.len(); + let is_own_backing_group = backing_group.contains(&ValidatorIndex(NODE_UNDER_TEST)); + let mut seconded_in_group = + BitVec::from_iter((0..group_size).map(|_| !is_own_backing_group)); + let mut validated_in_group = BitVec::from_iter((0..group_size).map(|_| false)); + + if is_own_backing_group { + let (pending_response, response_receiver) = oneshot::channel(); + let peer_id = self.test_authorities.peer_ids.get(index).unwrap().to_owned(); + node_sender + .start_send(NetworkMessage::RequestFromPeer(IncomingRequest { + peer: peer_id, + payload: AttestedCandidateRequest { + candidate_hash: manifest.candidate_hash, + mask: StatementFilter::blank(self.own_backing_group.len()), + } + .encode(), + pending_response, + })) + .unwrap(); + + let response = response_receiver.await.unwrap(); + let response = + AttestedCandidateResponse::decode(&mut response.result.unwrap().as_ref()) + .unwrap(); + + for statement in response.statements { + let validator_index = statement.unchecked_validator_index(); + let position_in_group = + backing_group.iter().position(|v| *v == validator_index).unwrap(); + match statement.unchecked_payload() { + CompactStatement::Seconded(_) => + seconded_in_group.set(position_in_group, true), + CompactStatement::Valid(_) => + validated_in_group.set(position_in_group, true), + } + } + } + + let ack = BackedCandidateAcknowledgement { + candidate_hash: manifest.candidate_hash, + statement_knowledge: StatementFilter { seconded_in_group, validated_in_group }, + }; + node_sender + .start_send(NetworkMessage::MessageFromPeer( + *self.test_authorities.peer_ids.get(index).unwrap(), + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::BackedCandidateKnown(ack), + )), + )) + .unwrap(); + + self.manifests_tracker + .get(&manifest.candidate_hash) + .unwrap() + .as_ref() + .store(true, Ordering::SeqCst); + + None + }, + NetworkMessage::MessageFromNode( + _authority_id, + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::BackedCandidateKnown(ack), + )), + ) => { + self.manifests_tracker + .get(&ack.candidate_hash) + .unwrap() + .as_ref() + .store(true, Ordering::SeqCst); + + None + }, + _ => Some(message), + } + } +} -- GitLab From 2d3a6932de35fc53da4e4b6bc195b1cc69550300 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 27 May 2024 23:29:50 +0200 Subject: [PATCH 073/106] `sc-chain-spec`: deprecated code removed (#4410) This PR removes deprecated code: - The `RuntimeGenesisConfig` generic type parameter in `GenericChainSpec` struct. - `ChainSpec::from_genesis` method allowing to create chain-spec using closure providing runtime genesis struct - `GenesisSource::Factory` variant together with no longer needed `GenesisSource`'s generic parameter `G` (which was intended to be a runtime genesis struct). https://github.com/paritytech/polkadot-sdk/blob/17b56fae2d976a3df87f34076875de8c26da0355/substrate/client/chain-spec/src/chain_spec.rs#L559-L563 --- Cargo.lock | 1 + .../polkadot-parachain/src/chain_spec/mod.rs | 2 +- cumulus/polkadot-parachain/src/command.rs | 2 +- cumulus/test/service/src/chain_spec.rs | 4 +- polkadot/node/service/src/chain_spec.rs | 6 +- polkadot/node/service/src/lib.rs | 4 +- polkadot/node/test/service/src/chain_spec.rs | 2 +- prdoc/pr_4410.prdoc | 37 +++ substrate/bin/node/cli/src/chain_spec.rs | 2 +- substrate/client/chain-spec/Cargo.toml | 1 + ...spec_as_json_fails_with_invalid_config.err | 114 --------- substrate/client/chain-spec/src/chain_spec.rs | 229 ++++-------------- substrate/client/chain-spec/src/lib.rs | 13 +- .../client/cli/src/commands/insert_key.rs | 4 +- substrate/client/cli/src/runner.rs | 15 +- .../grandpa/src/communication/tests.rs | 25 +- substrate/client/service/src/lib.rs | 2 +- substrate/client/service/test/src/lib.rs | 37 ++- templates/minimal/node/src/chain_spec.rs | 2 +- templates/parachain/node/src/chain_spec.rs | 2 +- templates/solochain/node/src/chain_spec.rs | 4 +- 21 files changed, 143 insertions(+), 365 deletions(-) create mode 100644 prdoc/pr_4410.prdoc delete mode 100644 substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err diff --git a/Cargo.lock b/Cargo.lock index acbda4f0326..3d6cbc9e83f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16702,6 +16702,7 @@ dependencies = [ "log", "memmap2 0.9.3", "parity-scale-codec", + "regex", "sc-chain-spec-derive", "sc-client-api", "sc-executor", diff --git a/cumulus/polkadot-parachain/src/chain_spec/mod.rs b/cumulus/polkadot-parachain/src/chain_spec/mod.rs index 136a19e3166..19047b073b0 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/mod.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/mod.rs @@ -54,7 +54,7 @@ impl Extensions { } /// Generic chain spec for all polkadot-parachain runtimes -pub type GenericChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type GenericChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 041187de488..653ea3281f0 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -1017,7 +1017,7 @@ mod tests { cfg_file_path } - pub type DummyChainSpec = sc_service::GenericChainSpec<(), E>; + pub type DummyChainSpec = sc_service::GenericChainSpec; pub fn create_default_with_extensions( id: &str, diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 4db2513e2b6..28faba7377e 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -17,7 +17,7 @@ #![allow(missing_docs)] use cumulus_primitives_core::ParaId; -use cumulus_test_runtime::{AccountId, RuntimeGenesisConfig, Signature}; +use cumulus_test_runtime::{AccountId, Signature}; use parachains_common::AuraId; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; @@ -26,7 +26,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 1b990af2394..c7019e3f0b2 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -70,11 +70,11 @@ pub struct Extensions { } // Generic chain spec, in case when we don't have the native runtime. -pub type GenericChainSpec = service::GenericChainSpec<(), Extensions>; +pub type GenericChainSpec = service::GenericChainSpec; /// The `ChainSpec` parameterized for the westend runtime. #[cfg(feature = "westend-native")] -pub type WestendChainSpec = service::GenericChainSpec<(), Extensions>; +pub type WestendChainSpec = service::GenericChainSpec; /// The `ChainSpec` parameterized for the westend runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. @@ -83,7 +83,7 @@ pub type WestendChainSpec = GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. #[cfg(feature = "rococo-native")] -pub type RococoChainSpec = service::GenericChainSpec<(), Extensions>; +pub type RococoChainSpec = service::GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 6d365b93ac7..f50b9770b41 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -100,8 +100,8 @@ pub use sc_executor::NativeExecutionDispatch; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; pub use service::{ config::{DatabaseSource, PrometheusConfig}, - ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis, - TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, + ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, TFullBackend, + TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, }; pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi}; pub use sp_runtime::{ diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index f14fa9fde58..e6a1229caf8 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -33,7 +33,7 @@ use test_runtime_constants::currency::DOTS; const DEFAULT_PROTOCOL_ID: &str = "dot"; /// The `ChainSpec` parameterized for polkadot test runtime. -pub type PolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type PolkadotChainSpec = sc_service::GenericChainSpec; /// Returns the properties for the [`PolkadotChainSpec`]. pub fn polkadot_chain_spec_properties() -> serde_json::map::Map { diff --git a/prdoc/pr_4410.prdoc b/prdoc/pr_4410.prdoc new file mode 100644 index 00000000000..1dc1d4c1f87 --- /dev/null +++ b/prdoc/pr_4410.prdoc @@ -0,0 +1,37 @@ +title: "[sc-chain-spec] Remove deprecated code" + +doc: + - audience: Node Dev + description: | + The RuntimeGenesisConfig generic type parameter was removed from GenericChainSpec struct. + ChainSpec::from_genesis method was removed. + Removed related deprecated code from `sc-chain-spec`. + This change simplifies the codebase and ensures the use of up-to-date definitions. + +crates: + - name: sc-service + bump: minor + - name: minimal-template-node + bump: minor + - name: sc-cli + bump: patch + - name: polkadot-test-service + bump: major + - name: sc-service-test + bump: major + - name: staging-node-cli + bump: major + - name: parachain-template-node + bump: minor + - name: solochain-template-node + bump: minor + - name: polkadot-parachain-bin + bump: major + - name: polkadot-service + bump: major + - name: sc-consensus-grandpa + bump: patch + - name: cumulus-test-service + bump: minor + - name: sc-chain-spec + bump: major diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index a3b536e5434..bc7821bfcf3 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -64,7 +64,7 @@ pub struct Extensions { } /// Specialized `ChainSpec`. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Flaming Fir testnet generator pub fn flaming_fir_config() -> Result { ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 84ef89783ad..9028a2c49ee 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -42,3 +42,4 @@ substrate-test-runtime = { path = "../../test-utils/runtime" } sp-keyring = { path = "../../primitives/keyring" } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } +regex = "1.6.0" diff --git a/substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err b/substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err deleted file mode 100644 index c545b53b2ba..00000000000 --- a/substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err +++ /dev/null @@ -1,114 +0,0 @@ -Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, `substrateTest`, `balances` at line 3 column 9 for blob: -{ - "system": {}, - "babex": { - "authorities": [ - [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - 1 - ], - [ - "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", - 1 - ], - [ - "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", - 1 - ] - ], - "epochConfig": { - "c": [ - 3, - 10 - ], - "allowed_slots": "PrimaryAndSecondaryPlainSlots" - } - }, - "substrateTest": { - "authorities": [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", - "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" - ] - }, - "balances": { - "balances": [ - [ - "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", - 100000000000000000 - ], - [ - "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", - 100000000000000000 - ], - [ - "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", - 100000000000000000 - ], - [ - "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", - 100000000000000000 - ], - [ - "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", - 100000000000000000 - ], - [ - "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", - 100000000000000000 - ], - [ - "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", - 100000000000000000 - ], - [ - "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", - 100000000000000000 - ], - [ - "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", - 100000000000000000 - ], - [ - "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", - 100000000000000000 - ], - [ - "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", - 100000000000000000 - ], - [ - "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", - 100000000000000000 - ], - [ - "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", - 100000000000000000 - ], - [ - "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", - 100000000000000000 - ], - [ - "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", - 100000000000000000 - ], - [ - "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", - 100000000000000000 - ], - [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - 100000000000000000 - ], - [ - "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", - 100000000000000000 - ], - [ - "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", - 100000000000000000 - ] - ] - } -} \ No newline at end of file diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index a9cdce4bf95..883cd19adfd 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -20,7 +20,7 @@ #![warn(missing_docs)] use crate::{ extension::GetExtension, genesis_config_builder::HostFunctions, ChainType, - GenesisConfigBuilderRuntimeCaller as RuntimeCaller, Properties, RuntimeGenesis, + GenesisConfigBuilderRuntimeCaller as RuntimeCaller, Properties, }; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; @@ -37,7 +37,6 @@ use std::{ fs::File, marker::PhantomData, path::PathBuf, - sync::Arc, }; #[derive(Serialize, Deserialize)] @@ -58,37 +57,33 @@ impl Clone for GenesisBuildAction { } } -#[allow(deprecated)] -enum GenesisSource { +enum GenesisSource { File(PathBuf), Binary(Cow<'static, [u8]>), /// factory function + code - //Factory and G type parameter shall be removed together with `ChainSpec::from_genesis` - Factory(Arc G + Send + Sync>, Vec), Storage(Storage), /// build action + code GenesisBuilderApi(GenesisBuildAction, Vec), } -impl Clone for GenesisSource { +impl Clone for GenesisSource { fn clone(&self) -> Self { match *self { Self::File(ref path) => Self::File(path.clone()), Self::Binary(ref d) => Self::Binary(d.clone()), - Self::Factory(ref f, ref c) => Self::Factory(f.clone(), c.clone()), Self::Storage(ref s) => Self::Storage(s.clone()), Self::GenesisBuilderApi(ref s, ref c) => Self::GenesisBuilderApi(s.clone(), c.clone()), } } } -impl GenesisSource { - fn resolve(&self) -> Result, String> { +impl GenesisSource { + fn resolve(&self) -> Result { /// helper container for deserializing genesis from the JSON file (ChainSpec JSON file is /// also supported here) #[derive(Serialize, Deserialize)] - struct GenesisContainer { - genesis: Genesis, + struct GenesisContainer { + genesis: Genesis, } match self { @@ -105,19 +100,15 @@ impl GenesisSource { })? }; - let genesis: GenesisContainer = json::from_slice(&bytes) + let genesis: GenesisContainer = json::from_slice(&bytes) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) }, Self::Binary(buf) => { - let genesis: GenesisContainer = json::from_reader(buf.as_ref()) + let genesis: GenesisContainer = json::from_reader(buf.as_ref()) .map_err(|e| format!("Error parsing embedded file: {}", e))?; Ok(genesis.genesis) }, - Self::Factory(f, code) => Ok(Genesis::RuntimeAndCode(RuntimeInnerWrapper { - runtime: f(), - code: code.clone(), - })), Self::Storage(storage) => Ok(Genesis::Raw(RawGenesis::from(storage.clone()))), Self::GenesisBuilderApi(GenesisBuildAction::Full(config), code) => Ok(Genesis::RuntimeGenesis(RuntimeGenesisInner { @@ -140,24 +131,12 @@ impl GenesisSource { } } -impl BuildStorage for ChainSpec +impl BuildStorage for ChainSpec where EHF: HostFunctions, { fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { match self.genesis.resolve()? { - #[allow(deprecated)] - Genesis::Runtime(runtime_genesis_config) => { - runtime_genesis_config.assimilate_storage(storage)?; - }, - #[allow(deprecated)] - Genesis::RuntimeAndCode(RuntimeInnerWrapper { - runtime: runtime_genesis_config, - code, - }) => { - runtime_genesis_config.assimilate_storage(storage)?; - storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); - }, Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => { storage.top.extend(map.into_iter().map(|(k, v)| (k.0, v.0))); children_map.into_iter().for_each(|(k, v)| { @@ -236,7 +215,7 @@ impl From for RawGenesis { } } -/// Inner representation of [`Genesis::RuntimeGenesis`] format +/// Inner representation of [`Genesis::RuntimeGenesis`] format #[derive(Serialize, Deserialize, Debug)] struct RuntimeGenesisInner { /// Runtime wasm code, expected to be hex-encoded in JSON. @@ -249,7 +228,7 @@ struct RuntimeGenesisInner { } /// Represents two possible variants of the contained JSON blob for the -/// [`Genesis::RuntimeGenesis`] format. +/// [`Genesis::RuntimeGenesis`] format. #[derive(Serialize, Deserialize, Debug)] #[serde(rename_all = "camelCase")] enum RuntimeGenesisConfigJson { @@ -265,31 +244,11 @@ enum RuntimeGenesisConfigJson { Patch(json::Value), } -/// Inner variant wrapper for deprecated runtime. -#[derive(Serialize, Deserialize, Debug)] -struct RuntimeInnerWrapper { - /// The native `RuntimeGenesisConfig` struct. - runtime: G, - /// Runtime code. - #[serde(with = "sp_core::bytes")] - code: Vec, -} - /// Represents the different formats of the genesis state within chain spec JSON blob. #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] -enum Genesis { - /// (Deprecated) Contains the JSON representation of G (the native type representing the - /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// without the runtime code. It is required to deserialize the legacy chainspecs generated - /// with `ChainsSpec::from_genesis` method. - Runtime(G), - /// (Deprecated) Contains the JSON representation of G (the native type representing the - /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// and the runtime code. It is required to create and deserialize JSON chainspecs created with - /// deprecated `ChainSpec::from_genesis` method. - RuntimeAndCode(RuntimeInnerWrapper), +enum Genesis { /// The genesis storage as raw data. Typically raw key-value entries in state. Raw(RawGenesis), /// State root hash of the genesis storage. @@ -343,7 +302,7 @@ struct ClientSpec { pub type NoExtension = Option<()>; /// Builder for creating [`ChainSpec`] instances. -pub struct ChainSpecBuilder { +pub struct ChainSpecBuilder { code: Vec, extensions: E, name: String, @@ -355,10 +314,9 @@ pub struct ChainSpecBuilder { protocol_id: Option, fork_id: Option, properties: Option, - _genesis: PhantomData, } -impl ChainSpecBuilder { +impl ChainSpecBuilder { /// Creates a new builder instance with no defaults. pub fn new(code: &[u8], extensions: E) -> Self { Self { @@ -373,7 +331,6 @@ impl ChainSpecBuilder { protocol_id: None, fork_id: None, properties: None, - _genesis: Default::default(), } } @@ -457,7 +414,7 @@ impl ChainSpecBuilder { } /// Builds a [`ChainSpec`] instance using the provided settings. - pub fn build(self) -> ChainSpec { + pub fn build(self) -> ChainSpec { let client_spec = ClientSpec { name: self.name, id: self.id, @@ -486,13 +443,13 @@ impl ChainSpecBuilder { /// The chain spec is generic over the native `RuntimeGenesisConfig` struct (`G`). It is also /// possible to parametrize chain spec over the extended host functions (EHF). It should be use if /// runtime is using the non-standard host function during genesis state creation. -pub struct ChainSpec { +pub struct ChainSpec { client_spec: ClientSpec, - genesis: GenesisSource, + genesis: GenesisSource, _host_functions: PhantomData, } -impl Clone for ChainSpec { +impl Clone for ChainSpec { fn clone(&self) -> Self { ChainSpec { client_spec: self.client_spec.clone(), @@ -502,7 +459,7 @@ impl Clone for ChainSpec { } } -impl ChainSpec { +impl ChainSpec { /// A list of bootnode addresses. pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { &self.client_spec.boot_nodes @@ -555,58 +512,18 @@ impl ChainSpec { &mut self.client_spec.extensions } - /// Create hardcoded spec. - #[deprecated( - note = "`from_genesis` is planned to be removed in May 2024. Use `builder()` instead." - )] - // deprecated note: Genesis::Runtime + GenesisSource::Factory shall also be removed - pub fn from_genesis G + 'static + Send + Sync>( - name: &str, - id: &str, - chain_type: ChainType, - constructor: F, - boot_nodes: Vec, - telemetry_endpoints: Option, - protocol_id: Option<&str>, - fork_id: Option<&str>, - properties: Option, - extensions: E, - code: &[u8], - ) -> Self { - let client_spec = ClientSpec { - name: name.to_owned(), - id: id.to_owned(), - chain_type, - boot_nodes, - telemetry_endpoints, - protocol_id: protocol_id.map(str::to_owned), - fork_id: fork_id.map(str::to_owned), - properties, - extensions, - consensus_engine: (), - genesis: Default::default(), - code_substitutes: BTreeMap::new(), - }; - - ChainSpec { - client_spec, - genesis: GenesisSource::Factory(Arc::new(constructor), code.into()), - _host_functions: Default::default(), - } - } - /// Type of the chain. fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } /// Provides a `ChainSpec` builder. - pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { + pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { ChainSpecBuilder::new(code, extensions) } } -impl ChainSpec { +impl ChainSpec { /// Parse json content into a `ChainSpec` pub fn from_json_bytes(json: impl Into>) -> Result { let json = json.into(); @@ -649,17 +566,17 @@ impl ChainS #[derive(Serialize, Deserialize)] // we cannot #[serde(deny_unknown_fields)]. Otherwise chain-spec-builder will fail on any // non-standard spec. -struct ChainSpecJsonContainer { +struct ChainSpecJsonContainer { #[serde(flatten)] client_spec: ClientSpec, - genesis: Genesis, + genesis: Genesis, } -impl ChainSpec +impl ChainSpec where EHF: HostFunctions, { - fn json_container(&self, raw: bool) -> Result, String> { + fn json_container(&self, raw: bool) -> Result, String> { let raw_genesis = match (raw, self.genesis.resolve()?) { ( true, @@ -685,20 +602,7 @@ where storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); RawGenesis::from(storage) }, - - #[allow(deprecated)] - (true, Genesis::RuntimeAndCode(RuntimeInnerWrapper { runtime: g, code })) => { - let mut storage = g.build_storage()?; - storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); - RawGenesis::from(storage) - }, - #[allow(deprecated)] - (true, Genesis::Runtime(g)) => { - let storage = g.build_storage()?; - RawGenesis::from(storage) - }, (true, Genesis::Raw(raw)) => raw, - (_, genesis) => return Ok(ChainSpecJsonContainer { client_spec: self.client_spec.clone(), genesis }), }; @@ -716,9 +620,8 @@ where } } -impl crate::ChainSpec for ChainSpec +impl crate::ChainSpec for ChainSpec where - G: RuntimeGenesis + 'static, E: GetExtension + serde::Serialize + Clone + Send + Sync + 'static, EHF: HostFunctions, { @@ -831,8 +734,8 @@ fn json_contains_path(doc: &json::Value, path: &mut VecDeque<&str>) -> bool { /// This function updates the code in given chain spec. /// -/// Function support updating the runtime code in provided JSON chain spec blob. `Genesis::Raw` -/// and `Genesis::RuntimeGenesis` formats are supported. +/// Function support updating the runtime code in provided JSON chain spec blob. `Genesis::Raw` +/// and `Genesis::RuntimeGenesis` formats are supported. /// /// If update was successful `true` is returned, otherwise `false`. Chain spec JSON is modified in /// place. @@ -871,19 +774,7 @@ mod tests { use sp_core::storage::well_known_keys; use sp_keyring::AccountKeyring; - #[derive(Debug, Serialize, Deserialize)] - struct Genesis(BTreeMap); - - impl BuildStorage for Genesis { - fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { - storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), - ); - Ok(()) - } - } - - type TestSpec = ChainSpec; + type TestSpec = ChainSpec; #[test] fn should_deserialize_example_chain_spec() { @@ -919,7 +810,7 @@ mod tests { } } - type TestSpec2 = ChainSpec; + type TestSpec2 = ChainSpec; #[test] fn should_deserialize_chain_spec_with_extensions() { @@ -1137,10 +1028,10 @@ mod tests { #[test] fn chain_spec_as_json_fails_with_invalid_config() { - let expected_error_message = - include_str!("../res/chain_spec_as_json_fails_with_invalid_config.err"); - let j = - include_str!("../../../test-utils/runtime/res/default_genesis_config_invalid_2.json"); + let invalid_genesis_config = from_str::(include_str!( + "../../../test-utils/runtime/res/default_genesis_config_invalid_2.json" + )) + .unwrap(); let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), @@ -1148,12 +1039,25 @@ mod tests { .with_name("TestName") .with_id("test_id") .with_chain_type(ChainType::Local) - .with_genesis_config(from_str(j).unwrap()) + .with_genesis_config(invalid_genesis_config.clone()) .build(); - let result = output.as_json(true); + let result = output.as_json(true).unwrap_err(); + let mut result = result.lines(); - assert_eq!(result.err().unwrap(), expected_error_message); + let result_header = result.next().unwrap(); + let result_body = result.collect::>().join("\n"); + let result_body: Value = serde_json::from_str(&result_body).unwrap(); + + let re = regex::Regex::new(concat!( + r"^Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, ", + r"`substrateTest`, `balances` at line \d+ column \d+ for blob:$" + )) + .unwrap(); + + assert_eq!(json!({"a":1,"b":2}), json!({"b":2,"a":1})); + assert!(re.is_match(result_header)); + assert_eq!(invalid_genesis_config, result_body); } #[test] @@ -1278,35 +1182,4 @@ mod tests { &|v| { *v == "0x000102040506" } )); } - - #[test] - fn generate_from_genesis_is_still_supported() { - #[allow(deprecated)] - let chain_spec: ChainSpec = ChainSpec::from_genesis( - "TestName", - "test", - ChainType::Local, - || Default::default(), - Vec::new(), - None, - None, - None, - None, - Default::default(), - &vec![0, 1, 2, 4, 5, 6], - ); - - let chain_spec_json = from_str::(&chain_spec.as_json(false).unwrap()).unwrap(); - assert!(json_eval_value_at_key( - &chain_spec_json, - &mut json_path!["genesis", "runtimeAndCode", "code"], - &|v| { *v == "0x000102040506" } - )); - let chain_spec_json = from_str::(&chain_spec.as_json(true).unwrap()).unwrap(); - assert!(json_eval_value_at_key( - &chain_spec_json, - &mut json_path!["genesis", "raw", "top", "0x3a636f6465"], - &|v| { *v == "0x000102040506" } - )); - } } diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index abe01dafd92..066a0ab9e2a 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -257,7 +257,7 @@ //! pub known_blocks: HashMap, //! } //! -//! pub type MyChainSpec = GenericChainSpec; +//! pub type MyChainSpec = GenericChainSpec; //! ``` //! Some parameters may require different values depending on the current blockchain height (a.k.a. //! forks). You can use the [`ChainSpecGroup`](macro@ChainSpecGroup) macro and the provided [`Forks`] @@ -286,10 +286,10 @@ //! pub type BlockNumber = u64; //! //! /// A chain spec supporting forkable `ClientParams`. -//! pub type MyChainSpec1 = GenericChainSpec>; +//! pub type MyChainSpec1 = GenericChainSpec>; //! //! /// A chain spec supporting forkable `Extension`. -//! pub type MyChainSpec2 = GenericChainSpec>; +//! pub type MyChainSpec2 = GenericChainSpec>; //! ``` //! It's also possible to have a set of parameters that are allowed to change with block numbers //! (i.e., they are forkable), and another set that is not subject to changes. This can also be @@ -316,7 +316,7 @@ //! pub pool: Forks, //! } //! -//! pub type MyChainSpec = GenericChainSpec; +//! pub type MyChainSpec = GenericChainSpec; //! ``` //! The chain spec can be extended with other fields that are opaque to the default chain spec. //! Specific node implementations will need to be able to deserialize these extensions. @@ -344,7 +344,6 @@ pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; -use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; use sp_runtime::BuildStorage; @@ -373,10 +372,6 @@ impl Default for ChainType { /// Arbitrary properties defined in chain spec as a JSON object pub type Properties = serde_json::map::Map; -/// A set of traits for the runtime genesis config. -pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} -impl RuntimeGenesis for T {} - /// Common interface of a chain specification. pub trait ChainSpec: BuildStorage + Send + Sync { /// Spec name. diff --git a/substrate/client/cli/src/commands/insert_key.rs b/substrate/client/cli/src/commands/insert_key.rs index 3d89610b28b..66dbec79486 100644 --- a/substrate/client/cli/src/commands/insert_key.rs +++ b/substrate/client/cli/src/commands/insert_key.rs @@ -126,8 +126,10 @@ mod tests { } fn load_spec(&self, _: &str) -> std::result::Result, String> { + let builder = + GenericChainSpec::::builder(Default::default(), NoExtension::None); Ok(Box::new( - GenericChainSpec::<()>::builder(Default::default(), NoExtension::None) + builder .with_name("test") .with_id("test_id") .with_chain_type(ChainType::Development) diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index 3bf27680784..6d986e38d2f 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -252,12 +252,15 @@ mod tests { state_pruning: None, blocks_pruning: sc_client_db::BlocksPruning::KeepAll, chain_spec: Box::new( - GenericChainSpec::<()>::builder(Default::default(), NoExtension::None) - .with_name("test") - .with_id("test_id") - .with_chain_type(ChainType::Development) - .with_genesis_config_patch(Default::default()) - .build(), + GenericChainSpec::::builder( + Default::default(), + NoExtension::None, + ) + .with_name("test") + .with_id("test_id") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(Default::default()) + .build(), ), wasm_method: Default::default(), wasm_runtime_overrides: None, diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index bc3023fc028..d7153a79ce0 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -706,25 +706,12 @@ fn peer_with_higher_view_leads_to_catch_up_request() { } fn local_chain_spec() -> Box { - use sc_chain_spec::{ChainSpec, GenericChainSpec}; - use serde::{Deserialize, Serialize}; - use sp_runtime::{BuildStorage, Storage}; - - #[derive(Debug, Serialize, Deserialize)] - struct Genesis(std::collections::BTreeMap); - impl BuildStorage for Genesis { - fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { - storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), - ); - Ok(()) - } - } - let chain_spec = GenericChainSpec::::from_json_bytes( - &include_bytes!("../../../../chain-spec/res/chain_spec.json")[..], - ) - .unwrap(); - chain_spec.cloned_box() + let chain_spec = + sc_chain_spec::GenericChainSpec::::from_json_bytes( + &include_bytes!("../../../../chain-spec/res/chain_spec.json")[..], + ) + .unwrap(); + sc_chain_spec::ChainSpec::cloned_box(&chain_spec) } #[test] diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index d251fd2b58f..a51bb4012d5 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -76,7 +76,7 @@ pub use config::{ }; pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, - Properties, RuntimeGenesis, + Properties, }; pub use sc_consensus::ImportQueue; diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index f19b5a19739..e60bd9410c6 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -31,7 +31,7 @@ use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig, RpcBatchRequestConfig}, BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, - RuntimeGenesis, SpawnTaskHandle, TaskManager, + SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderBackend; @@ -46,16 +46,16 @@ mod client; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet { +struct TestNet { runtime: Runtime, authority_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, full_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, - chain_spec: GenericChainSpec, + chain_spec: GenericChainSpec, base_port: u16, nodes: usize, } -impl Drop for TestNet { +impl Drop for TestNet { fn drop(&mut self) { // Drop the nodes before dropping the runtime, as the runtime otherwise waits for all // futures to be ended and we run into a dead lock. @@ -162,7 +162,7 @@ where } } -impl TestNet +impl TestNet where F: Clone + Send + 'static, U: Clone + Send + 'static, @@ -193,12 +193,9 @@ where } } -fn node_config< - G: RuntimeGenesis + 'static, - E: ChainSpecExtension + Clone + 'static + Send + Sync, ->( +fn node_config( index: usize, - spec: &GenericChainSpec, + spec: &GenericChainSpec, role: Role, tokio_handle: tokio::runtime::Handle, key_seed: Option, @@ -272,19 +269,18 @@ fn node_config< } } -impl TestNet +impl TestNet where F: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, { fn new( temp: &TempDir, - spec: GenericChainSpec, + spec: GenericChainSpec, full: impl Iterator Result<(F, U), Error>>, authorities: impl Iterator Result<(F, U), Error>)>, base_port: u16, - ) -> TestNet { + ) -> TestNet { sp_tracing::try_init_simple(); fdlimit::raise_fd_limit().unwrap(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -365,10 +361,9 @@ fn tempdir_with_prefix(prefix: &str) -> TempDir { .expect("Error creating test dir") } -pub fn connectivity(spec: GenericChainSpec, full_builder: Fb) +pub fn connectivity(spec: GenericChainSpec, full_builder: Fb) where E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, Fb: Fn(Configuration) -> Result, F: TestNetNode, { @@ -442,8 +437,8 @@ where } } -pub fn sync( - spec: GenericChainSpec, +pub fn sync( + spec: GenericChainSpec, full_builder: Fb, mut make_block_and_import: B, mut extrinsic_factory: ExF, @@ -454,7 +449,6 @@ pub fn sync( ExF: FnMut(&F, &U) -> ::Extrinsic, U: Clone + Send + 'static, E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; const NUM_BLOCKS: usize = 512; @@ -513,15 +507,14 @@ pub fn sync( network.run_until_all_full(|_index, service| service.transaction_pool().ready().count() == 1); } -pub fn consensus( - spec: GenericChainSpec, +pub fn consensus( + spec: GenericChainSpec, full_builder: Fb, authorities: impl IntoIterator, ) where Fb: Fn(Configuration) -> Result, F: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds diff --git a/templates/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs index 6b721deb6d1..7a3475bb167 100644 --- a/templates/minimal/node/src/chain_spec.rs +++ b/templates/minimal/node/src/chain_spec.rs @@ -21,7 +21,7 @@ use serde_json::{json, Value}; use sp_keyring::AccountKeyring; /// This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec<()>; +pub type ChainSpec = sc_service::GenericChainSpec; fn props() -> Properties { let mut properties = Properties::new(); diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index 51710f1199c..3fa91c02616 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -8,7 +8,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type ChainSpec = sc_service::GenericChainSpec; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; diff --git a/templates/solochain/node/src/chain_spec.rs b/templates/solochain/node/src/chain_spec.rs index be49f2c1fc7..651025e68de 100644 --- a/templates/solochain/node/src/chain_spec.rs +++ b/templates/solochain/node/src/chain_spec.rs @@ -1,5 +1,5 @@ use sc_service::ChainType; -use solochain_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; +use solochain_template_runtime::{AccountId, Signature, WASM_BINARY}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; @@ -9,7 +9,7 @@ use sp_runtime::traits::{IdentifyAccount, Verify}; // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Generate a crypto pair from seed. pub fn get_from_seed(seed: &str) -> ::Public { -- GitLab From 09f07d548bf73633cd691e8346c630026444e073 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Tue, 28 May 2024 09:37:26 +0200 Subject: [PATCH 074/106] Remove workspace lints from templates (#4598) This detaches the templates from monorepo's workspace lints, so the lints for the templates can evolve separately as needed. Currently the templates [re-use the monorepo's lints](https://github.com/paritytech/polkadot-sdk-minimal-template/blob/bd8afe66ec566d61f36b0e3d731145741a9e9e19/Cargo.toml#L16-L43) which looks weird. cc @kianenigma @gupnik --- .github/workflows/misc-sync-templates.yml | 2 -- templates/minimal/Cargo.toml | 3 --- templates/minimal/node/Cargo.toml | 3 --- templates/minimal/node/src/cli.rs | 2 +- templates/minimal/node/src/service.rs | 2 +- templates/minimal/pallets/template/Cargo.toml | 3 --- templates/minimal/runtime/Cargo.toml | 3 --- templates/parachain/node/Cargo.toml | 3 --- templates/parachain/node/src/cli.rs | 1 + templates/parachain/node/src/command.rs | 10 ++++------ templates/parachain/node/src/service.rs | 1 + templates/parachain/pallets/template/Cargo.toml | 3 --- .../parachain/pallets/template/src/benchmarking.rs | 2 +- templates/parachain/runtime/Cargo.toml | 3 --- templates/solochain/node/Cargo.toml | 3 --- templates/solochain/node/src/command.rs | 2 +- templates/solochain/pallets/template/Cargo.toml | 3 --- .../solochain/pallets/template/src/benchmarking.rs | 2 +- templates/solochain/runtime/Cargo.toml | 3 --- 19 files changed, 11 insertions(+), 43 deletions(-) diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml index 2699ff0fed3..b040c2fc89b 100644 --- a/.github/workflows/misc-sync-templates.yml +++ b/.github/workflows/misc-sync-templates.yml @@ -105,8 +105,6 @@ jobs: toml set templates/${{ matrix.template }}/Cargo.toml 'workspace.package.edition' "$(toml get --raw Cargo.toml 'workspace.package.edition')" > Cargo.temp mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml - toml get Cargo.toml 'workspace.lints' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml - toml get Cargo.toml 'workspace.dependencies' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml working-directory: polkadot-sdk - name: Print the result Cargo.tomls for debugging diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml index 6cd28c5a493..95656ff92d2 100644 --- a/templates/minimal/Cargo.toml +++ b/templates/minimal/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [dependencies] minimal-template-node = { path = "./node" } minimal-template-runtime = { path = "./runtime" } diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 606fd058035..f732eff445c 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -10,9 +10,6 @@ edition.workspace = true publish = false build = "build.rs" -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/minimal/node/src/cli.rs b/templates/minimal/node/src/cli.rs index e464fa7d6ca..22726b7eb9a 100644 --- a/templates/minimal/node/src/cli.rs +++ b/templates/minimal/node/src/cli.rs @@ -32,7 +32,7 @@ impl std::str::FromStr for Consensus { } else if let Some(block_time) = s.strip_prefix("manual-seal-") { Consensus::ManualSeal(block_time.parse().map_err(|_| "invalid block time")?) } else { - return Err("incorrect consensus identifier".into()) + return Err("incorrect consensus identifier".into()); }) } } diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index d84df95dc19..5a92627621b 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -61,7 +61,7 @@ pub fn new_partial(config: &Configuration) -> Result { }) .transpose()?; - let executor = sc_service::new_wasm_executor(&config); + let executor = sc_service::new_wasm_executor(config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index e6fe43abc09..30962664481 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 99559308e5b..3581ca7c851 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [dependencies] parity-scale-codec = { version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 6f715082982..4fe228f71fe 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -10,9 +10,6 @@ edition.workspace = true publish = false build = "build.rs" -[lints] -workspace = true - # [[bin]] # name = "parachain-template-node" diff --git a/templates/parachain/node/src/cli.rs b/templates/parachain/node/src/cli.rs index cffbfbc1db2..f008e856d99 100644 --- a/templates/parachain/node/src/cli.rs +++ b/templates/parachain/node/src/cli.rs @@ -1,6 +1,7 @@ use std::path::PathBuf; /// Sub-commands supported by the collator. +#[allow(clippy::large_enum_variant)] #[derive(Debug, clap::Subcommand)] pub enum Subcommand { /// Build a chain specification. diff --git a/templates/parachain/node/src/command.rs b/templates/parachain/node/src/command.rs index 56ae022cad2..eba7fdcdae7 100644 --- a/templates/parachain/node/src/command.rs +++ b/templates/parachain/node/src/command.rs @@ -194,13 +194,11 @@ pub fn run() -> Result<()> { cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => - return Err(sc_cli::Error::Input( - "Compile with --features=runtime-benchmarks \ + BenchmarkCmd::Storage(_) => Err(sc_cli::Error::Input( + "Compile with --features=runtime-benchmarks \ to enable storage benchmarks." - .into(), - ) - .into()), + .into(), + )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { let partials = new_partial(&config)?; diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index ad4689c6e55..ce630891587 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -160,6 +160,7 @@ fn build_import_queue( ) } +#[allow(clippy::too_many_arguments)] fn start_consensus( client: Arc, backend: Arc, diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index c5334e871fa..f5411c02821 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/parachain/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs index 5a262417629..d1a9554aed6 100644 --- a/templates/parachain/pallets/template/src/benchmarking.rs +++ b/templates/parachain/pallets/template/src/benchmarking.rs @@ -13,7 +13,7 @@ mod benchmarks { #[benchmark] fn do_something() { - let value = 100u32.into(); + let value = 100u32; let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] do_something(RawOrigin::Signed(caller), value); diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index a74c6a541f4..e88284bedb6 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 9332da3a654..515f85e5418 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -11,9 +11,6 @@ publish = false build = "build.rs" -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/solochain/node/src/command.rs b/templates/solochain/node/src/command.rs index e46fedc91f0..624ace1bf35 100644 --- a/templates/solochain/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -114,7 +114,7 @@ pub fn run() -> sc_cli::Result<()> { "Runtime benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into(), - ) + ); } cmd.run_with_spec::, ()>(Some( diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index 1a122bd82d4..8c6f26d8e5d 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs index 5a262417629..d1a9554aed6 100644 --- a/templates/solochain/pallets/template/src/benchmarking.rs +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -13,7 +13,7 @@ mod benchmarks { #[benchmark] fn do_something() { - let value = 100u32.into(); + let value = 100u32; let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] do_something(RawOrigin::Signed(caller), value); diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index b4a543826e7..8aeb1a6a16e 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -9,9 +9,6 @@ repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -- GitLab From 523e62560eb5d9a36ea75851f2fb15b9d7993f01 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 28 May 2024 11:15:50 +0300 Subject: [PATCH 075/106] Add availability-recovery from systematic chunks (#1644) **Don't look at the commit history, it's confusing, as this branch is based on another branch that was merged** Fixes #598 Also implements [RFC #47](https://github.com/polkadot-fellows/RFCs/pull/47) ## Description - Availability-recovery now first attempts to request the systematic chunks for large POVs (which are the first ~n/3 chunks, which can recover the full data without doing the costly reed-solomon decoding process). This has a fallback of recovering from all chunks, if for some reason the process fails. Additionally, backers are also used as a backup for requesting the systematic chunks if the assigned validator is not offering the chunk (each backer is only used for one systematic chunk, to not overload them). - Quite obviously, recovering from systematic chunks is much faster than recovering from regular chunks (4000% faster as measured on my apple M2 Pro). - Introduces a `ValidatorIndex` -> `ChunkIndex` mapping which is different for every core, in order to avoid only querying the first n/3 validators over and over again in the same session. The mapping is the one described in RFC 47. - The mapping is feature-gated by the [NodeFeatures runtime API](https://github.com/paritytech/polkadot-sdk/pull/2177) so that it can only be enabled via a governance call once a sufficient majority of validators have upgraded their client. If the feature is not enabled, the mapping will be the identity mapping and backwards-compatibility will be preserved. - Adds a new chunk request protocol version (v2), which adds the ChunkIndex to the response. This may or may not be checked against the expected chunk index. For av-distribution and systematic recovery, this will be checked, but for regular recovery, no. This is backwards compatible. First, a v2 request is attempted. If that fails during protocol negotiation, v1 is used. - Systematic recovery is only attempted during approval-voting, where we have easy access to the core_index. For disputes and collator pov_recovery, regular chunk requests are used, just as before. ## Performance results Some results from subsystem-bench: with regular chunk recovery: CPU usage per block 39.82s with recovery from backers: CPU usage per block 16.03s with systematic recovery: CPU usage per block 19.07s End-to-end results here: https://github.com/paritytech/polkadot-sdk/issues/598#issuecomment-1792007099 #### TODO: - [x] [RFC #47](https://github.com/polkadot-fellows/RFCs/pull/47) - [x] merge https://github.com/paritytech/polkadot-sdk/pull/2177 and rebase on top of those changes - [x] merge https://github.com/paritytech/polkadot-sdk/pull/2771 and rebase - [x] add tests - [x] preliminary performance measure on Versi: see https://github.com/paritytech/polkadot-sdk/issues/598#issuecomment-1792007099 - [x] Rewrite the implementer's guide documentation - [x] https://github.com/paritytech/polkadot-sdk/pull/3065 - [x] https://github.com/paritytech/zombienet/issues/1705 and fix zombienet tests - [x] security audit - [x] final versi test and performance measure --------- Signed-off-by: alindima Co-authored-by: Javier Viola --- .gitlab/pipeline/zombienet.yml | 2 +- .gitlab/pipeline/zombienet/polkadot.yml | 16 + Cargo.lock | 8 +- .../src/active_candidate_recovery.rs | 1 + .../relay-chain-minimal-node/src/lib.rs | 3 + cumulus/test/service/src/lib.rs | 5 +- polkadot/erasure-coding/Cargo.toml | 1 + polkadot/erasure-coding/benches/README.md | 6 +- .../benches/scaling_with_validators.rs | 36 +- polkadot/erasure-coding/src/lib.rs | 93 + polkadot/node/core/approval-voting/src/lib.rs | 16 +- .../node/core/approval-voting/src/tests.rs | 2 +- polkadot/node/core/av-store/src/lib.rs | 76 +- polkadot/node/core/av-store/src/tests.rs | 380 ++- polkadot/node/core/backing/src/lib.rs | 28 +- polkadot/node/core/backing/src/tests/mod.rs | 44 +- .../src/tests/prospective_parachains.rs | 8 +- .../node/core/bitfield-signing/src/lib.rs | 53 +- .../node/core/bitfield-signing/src/tests.rs | 4 +- .../src/participation/mod.rs | 1 + .../src/participation/tests.rs | 12 +- polkadot/node/jaeger/src/spans.rs | 8 +- .../availability-distribution/Cargo.toml | 2 + .../availability-distribution/src/error.rs | 8 +- .../availability-distribution/src/lib.rs | 41 +- .../src/requester/fetch_task/mod.rs | 131 +- .../src/requester/fetch_task/tests.rs | 291 ++- .../src/requester/mod.rs | 127 +- .../src/requester/session_cache.rs | 63 +- .../src/requester/tests.rs | 36 +- .../src/responder.rs | 124 +- .../src/tests/mock.rs | 26 +- .../src/tests/mod.rs | 121 +- .../src/tests/state.rs | 196 +- .../network/availability-recovery/Cargo.toml | 3 +- .../availability-recovery-regression-bench.rs | 4 +- .../availability-recovery/src/error.rs | 58 +- .../network/availability-recovery/src/lib.rs | 562 +++-- .../availability-recovery/src/metrics.rs | 242 +- .../network/availability-recovery/src/task.rs | 861 ------- .../availability-recovery/src/task/mod.rs | 197 ++ .../src/task/strategy/chunks.rs | 335 +++ .../src/task/strategy/full.rs | 174 ++ .../src/task/strategy/mod.rs | 1558 ++++++++++++ .../src/task/strategy/systematic.rs | 343 +++ .../availability-recovery/src/tests.rs | 2140 ++++++++++++++--- polkadot/node/network/bridge/src/tx/mod.rs | 10 +- .../protocol/src/request_response/mod.rs | 12 +- .../protocol/src/request_response/outgoing.rs | 36 +- .../protocol/src/request_response/v1.rs | 14 +- .../protocol/src/request_response/v2.rs | 62 +- polkadot/node/overseer/src/tests.rs | 1 + polkadot/node/primitives/src/lib.rs | 7 +- polkadot/node/service/src/lib.rs | 8 +- polkadot/node/service/src/overseer.rs | 24 +- polkadot/node/subsystem-bench/Cargo.toml | 1 + .../examples/availability_read.yaml | 8 +- .../src/lib/availability/mod.rs | 124 +- .../src/lib/availability/test_state.rs | 41 +- .../subsystem-bench/src/lib/mock/av_store.rs | 111 +- .../src/lib/mock/network_bridge.rs | 2 +- .../src/lib/mock/runtime_api.rs | 29 +- .../node/subsystem-bench/src/lib/network.rs | 8 +- .../node/subsystem-test-helpers/src/lib.rs | 4 +- polkadot/node/subsystem-types/Cargo.toml | 1 + polkadot/node/subsystem-types/src/errors.rs | 26 +- polkadot/node/subsystem-types/src/messages.rs | 11 +- polkadot/node/subsystem-util/Cargo.toml | 1 + .../subsystem-util/src/availability_chunks.rs | 227 ++ polkadot/node/subsystem-util/src/lib.rs | 26 +- .../node/subsystem-util/src/runtime/error.rs | 2 +- .../node/subsystem-util/src/runtime/mod.rs | 11 +- polkadot/primitives/src/lib.rs | 40 +- polkadot/primitives/src/v7/mod.rs | 45 +- .../src/node/approval/approval-voting.md | 2 +- .../availability/availability-recovery.md | 249 +- .../src/types/overseer-protocol.md | 3 + .../functional/0013-enable-node-feature.js | 35 + .../0013-systematic-chunk-recovery.toml | 46 + .../0013-systematic-chunk-recovery.zndsl | 43 + ...-chunk-fetching-network-compatibility.toml | 48 + ...chunk-fetching-network-compatibility.zndsl | 53 + prdoc/pr_1644.prdoc | 59 + substrate/client/network/src/service.rs | 2 +- 84 files changed, 7540 insertions(+), 2338 deletions(-) delete mode 100644 polkadot/node/network/availability-recovery/src/task.rs create mode 100644 polkadot/node/network/availability-recovery/src/task/mod.rs create mode 100644 polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs create mode 100644 polkadot/node/network/availability-recovery/src/task/strategy/full.rs create mode 100644 polkadot/node/network/availability-recovery/src/task/strategy/mod.rs create mode 100644 polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs create mode 100644 polkadot/node/subsystem-util/src/availability_chunks.rs create mode 100644 polkadot/zombienet_tests/functional/0013-enable-node-feature.js create mode 100644 polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml create mode 100644 polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl create mode 100644 polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml create mode 100644 polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl create mode 100644 prdoc/pr_1644.prdoc diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 404b57b07c5..7897e55e291 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.104" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.105" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index a9f0eb93033..b158cbe0b5a 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -183,6 +183,22 @@ zombienet-polkadot-functional-0012-spam-statement-distribution-requests: --local-dir="${LOCAL_DIR}/functional" --test="0012-spam-statement-distribution-requests.zndsl" +zombienet-polkadot-functional-0013-systematic-chunk-recovery: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0013-systematic-chunk-recovery.zndsl" + +zombienet-polkadot-functional-0014-chunk-fetching-network-compatibility: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0014-chunk-fetching-network-compatibility.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index 3d6cbc9e83f..6240d9db2ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12625,6 +12625,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand 0.8.5", + "rstest", "sc-network", "schnellru", "sp-core", @@ -12641,7 +12642,6 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12657,11 +12657,13 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand 0.8.5", + "rstest", "sc-network", "schnellru", "sp-application-crypto", "sp-core", "sp-keyring", + "sp-tracing 16.0.0", "thiserror", "tokio", "tracing-gum", @@ -12789,6 +12791,7 @@ dependencies = [ "parity-scale-codec", "polkadot-node-primitives", "polkadot-primitives", + "quickcheck", "reed-solomon-novelpoly", "sp-core", "sp-trie", @@ -13435,6 +13438,7 @@ dependencies = [ "async-trait", "bitvec", "derive_more", + "fatality", "futures", "orchestra", "polkadot-node-jaeger", @@ -13477,6 +13481,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "pin-project", + "polkadot-erasure-coding", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", @@ -14564,6 +14569,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", + "strum 0.24.1", "substrate-prometheus-endpoint", "tokio", "tracing-gum", diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index 2c635320ff4..c41c543f04d 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -56,6 +56,7 @@ impl ActiveCandidateRecovery { candidate.receipt.clone(), candidate.session_index, None, + None, tx, ), "ActiveCandidateRecovery", diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index b84427c3a75..699393e2d48 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -285,5 +285,8 @@ fn build_request_response_protocol_receivers< let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config::<_, Network>(request_protocol_names); config.add_request_response_protocol(cfg); + let cfg = + Protocol::ChunkFetchingV2.get_outbound_only_config::<_, Network>(request_protocol_names); + config.add_request_response_protocol(cfg); (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index f2a61280386..6f8b9d19bb2 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -152,7 +152,7 @@ impl RecoveryHandle for FailingRecoveryHandle { message: AvailabilityRecoveryMessage, origin: &'static str, ) { - let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _) = message; + let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _, _) = message; let candidate_hash = receipt.hash(); // For every 3rd block we immediately signal unavailability to trigger @@ -160,7 +160,8 @@ impl RecoveryHandle for FailingRecoveryHandle { if self.counter % 3 == 0 && self.failed_hashes.insert(candidate_hash) { tracing::info!(target: LOG_TARGET, ?candidate_hash, "Failing pov recovery."); - let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, back_sender) = message; + let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, back_sender) = + message; back_sender .send(Err(RecoveryError::Unavailable)) .expect("Return channel should work here."); diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index b230631f72b..bf152e03be7 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -19,6 +19,7 @@ sp-trie = { path = "../../substrate/primitives/trie" } thiserror = { workspace = true } [dev-dependencies] +quickcheck = { version = "1.0.3", default-features = false } criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] } [[bench]] diff --git a/polkadot/erasure-coding/benches/README.md b/polkadot/erasure-coding/benches/README.md index 94fca5400c6..20f79827d28 100644 --- a/polkadot/erasure-coding/benches/README.md +++ b/polkadot/erasure-coding/benches/README.md @@ -7,7 +7,8 @@ cargo bench ## `scaling_with_validators` This benchmark evaluates the performance of constructing the chunks and the erasure root from PoV and -reconstructing the PoV from chunks. You can see the results of running this bench on 5950x below. +reconstructing the PoV from chunks (either from systematic chunks or regular chunks). +You can see the results of running this bench on 5950x below (only including recovery from regular chunks). Interestingly, with `10_000` chunks (validators) its slower than with `50_000` for both construction and reconstruction. ``` @@ -37,3 +38,6 @@ reconstruct/10000 time: [496.35 ms 505.17 ms 515.42 ms] reconstruct/50000 time: [276.56 ms 277.53 ms 278.58 ms] thrpt: [17.948 MiB/s 18.016 MiB/s 18.079 MiB/s] ``` + +Results from running on an Apple M2 Pro, systematic recovery is generally 40 times faster than +regular recovery, achieving 1 Gib/s. diff --git a/polkadot/erasure-coding/benches/scaling_with_validators.rs b/polkadot/erasure-coding/benches/scaling_with_validators.rs index 759385bbdef..3d743faa416 100644 --- a/polkadot/erasure-coding/benches/scaling_with_validators.rs +++ b/polkadot/erasure-coding/benches/scaling_with_validators.rs @@ -53,12 +53,16 @@ fn construct_and_reconstruct_5mb_pov(c: &mut Criterion) { } group.finish(); - let mut group = c.benchmark_group("reconstruct"); + let mut group = c.benchmark_group("reconstruct_regular"); for n_validators in N_VALIDATORS { let all_chunks = chunks(n_validators, &pov); - let mut c: Vec<_> = all_chunks.iter().enumerate().map(|(i, c)| (&c[..], i)).collect(); - let last_chunks = c.split_off((c.len() - 1) * 2 / 3); + let chunks: Vec<_> = all_chunks + .iter() + .enumerate() + .take(polkadot_erasure_coding::recovery_threshold(n_validators).unwrap()) + .map(|(i, c)| (&c[..], i)) + .collect(); group.throughput(Throughput::Bytes(pov.len() as u64)); group.bench_with_input( @@ -67,7 +71,31 @@ fn construct_and_reconstruct_5mb_pov(c: &mut Criterion) { |b, &n| { b.iter(|| { let _pov: Vec = - polkadot_erasure_coding::reconstruct(n, last_chunks.clone()).unwrap(); + polkadot_erasure_coding::reconstruct(n, chunks.clone()).unwrap(); + }); + }, + ); + } + group.finish(); + + let mut group = c.benchmark_group("reconstruct_systematic"); + for n_validators in N_VALIDATORS { + let all_chunks = chunks(n_validators, &pov); + + let chunks = all_chunks + .into_iter() + .take(polkadot_erasure_coding::systematic_recovery_threshold(n_validators).unwrap()) + .collect::>(); + + group.throughput(Throughput::Bytes(pov.len() as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(n_validators), + &n_validators, + |b, &n| { + b.iter(|| { + let _pov: Vec = + polkadot_erasure_coding::reconstruct_from_systematic(n, chunks.clone()) + .unwrap(); }); }, ); diff --git a/polkadot/erasure-coding/src/lib.rs b/polkadot/erasure-coding/src/lib.rs index e5155df4beb..b354c3dac64 100644 --- a/polkadot/erasure-coding/src/lib.rs +++ b/polkadot/erasure-coding/src/lib.rs @@ -69,6 +69,9 @@ pub enum Error { /// Bad payload in reconstructed bytes. #[error("Reconstructed payload invalid")] BadPayload, + /// Unable to decode reconstructed bytes. + #[error("Unable to decode reconstructed payload: {0}")] + Decode(#[source] parity_scale_codec::Error), /// Invalid branch proof. #[error("Invalid branch proof")] InvalidBranchProof, @@ -110,6 +113,14 @@ pub const fn recovery_threshold(n_validators: usize) -> Result { Ok(needed + 1) } +/// Obtain the threshold of systematic chunks that should be enough to recover the data. +/// +/// If the regular `recovery_threshold` is a power of two, then it returns the same value. +/// Otherwise, it returns the next lower power of two. +pub fn systematic_recovery_threshold(n_validators: usize) -> Result { + code_params(n_validators).map(|params| params.k()) +} + fn code_params(n_validators: usize) -> Result { // we need to be able to reconstruct from 1/3 - eps @@ -127,6 +138,41 @@ fn code_params(n_validators: usize) -> Result { }) } +/// Reconstruct the v1 available data from the set of systematic chunks. +/// +/// Provide a vector containing chunk data. If too few chunks are provided, recovery is not +/// possible. +pub fn reconstruct_from_systematic_v1( + n_validators: usize, + chunks: Vec>, +) -> Result { + reconstruct_from_systematic(n_validators, chunks) +} + +/// Reconstruct the available data from the set of systematic chunks. +/// +/// Provide a vector containing the first k chunks in order. If too few chunks are provided, +/// recovery is not possible. +pub fn reconstruct_from_systematic( + n_validators: usize, + chunks: Vec>, +) -> Result { + let code_params = code_params(n_validators)?; + let k = code_params.k(); + + for chunk_data in chunks.iter().take(k) { + if chunk_data.len() % 2 != 0 { + return Err(Error::UnevenLength) + } + } + + let bytes = code_params.make_encoder().reconstruct_from_systematic( + chunks.into_iter().take(k).map(|data| WrappedShard::new(data)).collect(), + )?; + + Decode::decode(&mut &bytes[..]).map_err(|err| Error::Decode(err)) +} + /// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator. /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. @@ -285,13 +331,41 @@ pub fn branch_hash(root: &H256, branch_nodes: &Proof, index: usize) -> Result

Self { + // Limit the POV len to 1 mib, otherwise the test will take forever + let pov_len = (u32::arbitrary(g) % (1024 * 1024)).max(2); + + let pov = (0..pov_len).map(|_| u8::arbitrary(g)).collect(); + + let pvd = PersistedValidationData { + parent_head: HeadData((0..u16::arbitrary(g)).map(|_| u8::arbitrary(g)).collect()), + relay_parent_number: u32::arbitrary(g), + relay_parent_storage_root: [u8::arbitrary(g); 32].into(), + max_pov_size: u32::arbitrary(g), + }; + + ArbitraryAvailableData(AvailableData { + pov: Arc::new(PoV { block_data: BlockData(pov) }), + validation_data: pvd, + }) + } + } + #[test] fn field_order_is_right_size() { assert_eq!(MAX_VALIDATORS, 65536); @@ -318,6 +392,25 @@ mod tests { assert_eq!(reconstructed, available_data); } + #[test] + fn round_trip_systematic_works() { + fn property(available_data: ArbitraryAvailableData, n_validators: u16) { + let n_validators = n_validators.max(2); + let kpow2 = systematic_recovery_threshold(n_validators as usize).unwrap(); + let chunks = obtain_chunks(n_validators as usize, &available_data.0).unwrap(); + assert_eq!( + reconstruct_from_systematic_v1( + n_validators as usize, + chunks.into_iter().take(kpow2).collect() + ) + .unwrap(), + available_data.0 + ); + } + + QuickCheck::new().quickcheck(property as fn(ArbitraryAvailableData, u16)) + } + #[test] fn reconstruct_does_not_panic_on_low_validator_count() { let reconstructed = reconstruct_v1(1, [].iter().cloned()); diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index b5ed92fa39c..c667aee7361 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -914,6 +914,7 @@ enum Action { candidate: CandidateReceipt, backing_group: GroupIndex, distribute_assignment: bool, + core_index: Option, }, NoteApprovedInChainSelection(Hash), IssueApproval(CandidateHash, ApprovalVoteRequest), @@ -1174,6 +1175,7 @@ async fn handle_actions( candidate, backing_group, distribute_assignment, + core_index, } => { // Don't launch approval work if the node is syncing. if let Mode::Syncing(_) = *mode { @@ -1230,6 +1232,7 @@ async fn handle_actions( block_hash, backing_group, executor_params, + core_index, &launch_approval_span, ) .await @@ -1467,6 +1470,7 @@ async fn distribution_messages_for_activation( candidate: candidate_entry.candidate_receipt().clone(), backing_group: approval_entry.backing_group(), distribute_assignment: false, + core_index: Some(*core_index), }); } }, @@ -3050,6 +3054,11 @@ async fn process_wakeup( "Launching approval work.", ); + let candidate_core_index = block_entry + .candidates() + .iter() + .find_map(|(core_index, h)| (h == &candidate_hash).then_some(*core_index)); + if let Some(claimed_core_indices) = get_assignment_core_indices(&indirect_cert.cert.kind, &candidate_hash, &block_entry) { @@ -3062,7 +3071,6 @@ async fn process_wakeup( true }; db.write_block_entry(block_entry.clone()); - actions.push(Action::LaunchApproval { claimed_candidate_indices, candidate_hash, @@ -3074,10 +3082,12 @@ async fn process_wakeup( candidate: candidate_receipt, backing_group, distribute_assignment, + core_index: candidate_core_index, }); }, Err(err) => { - // Never happens, it should only happen if no cores are claimed, which is a bug. + // Never happens, it should only happen if no cores are claimed, which is a + // bug. gum::warn!( target: LOG_TARGET, block_hash = ?relay_block, @@ -3133,6 +3143,7 @@ async fn launch_approval( block_hash: Hash, backing_group: GroupIndex, executor_params: ExecutorParams, + core_index: Option, span: &jaeger::Span, ) -> SubsystemResult> { let (a_tx, a_rx) = oneshot::channel(); @@ -3179,6 +3190,7 @@ async fn launch_approval( candidate.clone(), session_index, Some(backing_group), + core_index, a_tx, )) .await; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 312d805bbef..c3709de59e8 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -3330,7 +3330,7 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Ok(available_data)).unwrap(); }, diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index 68db4686a97..59a35a6a45a 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -48,8 +48,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util as util; use polkadot_primitives::{ - BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, Hash, Header, ValidatorIndex, + BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, Hash, + Header, NodeFeatures, ValidatorIndex, }; +use util::availability_chunks::availability_chunk_indices; mod metrics; pub use self::metrics::*; @@ -208,9 +210,9 @@ fn load_chunk( db: &Arc, config: &Config, candidate_hash: &CandidateHash, - chunk_index: ValidatorIndex, + validator_index: ValidatorIndex, ) -> Result, Error> { - let key = (CHUNK_PREFIX, candidate_hash, chunk_index).encode(); + let key = (CHUNK_PREFIX, candidate_hash, validator_index).encode(); query_inner(db, config.col_data, &key) } @@ -219,10 +221,10 @@ fn write_chunk( tx: &mut DBTransaction, config: &Config, candidate_hash: &CandidateHash, - chunk_index: ValidatorIndex, + validator_index: ValidatorIndex, erasure_chunk: &ErasureChunk, ) { - let key = (CHUNK_PREFIX, candidate_hash, chunk_index).encode(); + let key = (CHUNK_PREFIX, candidate_hash, validator_index).encode(); tx.put_vec(config.col_data, &key, erasure_chunk.encode()); } @@ -231,9 +233,9 @@ fn delete_chunk( tx: &mut DBTransaction, config: &Config, candidate_hash: &CandidateHash, - chunk_index: ValidatorIndex, + validator_index: ValidatorIndex, ) { - let key = (CHUNK_PREFIX, candidate_hash, chunk_index).encode(); + let key = (CHUNK_PREFIX, candidate_hash, validator_index).encode(); tx.delete(config.col_data, &key[..]); } @@ -1139,20 +1141,23 @@ fn process_message( Some(meta) => { let mut chunks = Vec::new(); - for (index, _) in meta.chunks_stored.iter().enumerate().filter(|(_, b)| **b) { + for (validator_index, _) in + meta.chunks_stored.iter().enumerate().filter(|(_, b)| **b) + { + let validator_index = ValidatorIndex(validator_index as _); let _timer = subsystem.metrics.time_get_chunk(); match load_chunk( &subsystem.db, &subsystem.config, &candidate, - ValidatorIndex(index as _), + validator_index, )? { - Some(c) => chunks.push(c), + Some(c) => chunks.push((validator_index, c)), None => { gum::warn!( target: LOG_TARGET, ?candidate, - index, + ?validator_index, "No chunk found for set bit in meta" ); }, @@ -1169,11 +1174,17 @@ fn process_message( }); let _ = tx.send(a); }, - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk, tx } => { + AvailabilityStoreMessage::StoreChunk { candidate_hash, validator_index, chunk, tx } => { subsystem.metrics.on_chunks_received(1); let _timer = subsystem.metrics.time_store_chunk(); - match store_chunk(&subsystem.db, &subsystem.config, candidate_hash, chunk) { + match store_chunk( + &subsystem.db, + &subsystem.config, + candidate_hash, + validator_index, + chunk, + ) { Ok(true) => { let _ = tx.send(Ok(())); }, @@ -1191,6 +1202,8 @@ fn process_message( n_validators, available_data, expected_erasure_root, + core_index, + node_features, tx, } => { subsystem.metrics.on_chunks_received(n_validators as _); @@ -1203,6 +1216,8 @@ fn process_message( n_validators as _, available_data, expected_erasure_root, + core_index, + node_features, ); match res { @@ -1233,6 +1248,7 @@ fn store_chunk( db: &Arc, config: &Config, candidate_hash: CandidateHash, + validator_index: ValidatorIndex, chunk: ErasureChunk, ) -> Result { let mut tx = DBTransaction::new(); @@ -1242,12 +1258,12 @@ fn store_chunk( None => return Ok(false), // we weren't informed of this candidate by import events. }; - match meta.chunks_stored.get(chunk.index.0 as usize).map(|b| *b) { + match meta.chunks_stored.get(validator_index.0 as usize).map(|b| *b) { Some(true) => return Ok(true), // already stored. Some(false) => { - meta.chunks_stored.set(chunk.index.0 as usize, true); + meta.chunks_stored.set(validator_index.0 as usize, true); - write_chunk(&mut tx, config, &candidate_hash, chunk.index, &chunk); + write_chunk(&mut tx, config, &candidate_hash, validator_index, &chunk); write_meta(&mut tx, config, &candidate_hash, &meta); }, None => return Ok(false), // out of bounds. @@ -1257,6 +1273,7 @@ fn store_chunk( target: LOG_TARGET, ?candidate_hash, chunk_index = %chunk.index.0, + validator_index = %validator_index.0, "Stored chunk index for candidate.", ); @@ -1264,13 +1281,14 @@ fn store_chunk( Ok(true) } -// Ok(true) on success, Ok(false) on failure, and Err on internal error. fn store_available_data( subsystem: &AvailabilityStoreSubsystem, candidate_hash: CandidateHash, n_validators: usize, available_data: AvailableData, expected_erasure_root: Hash, + core_index: CoreIndex, + node_features: NodeFeatures, ) -> Result<(), Error> { let mut tx = DBTransaction::new(); @@ -1312,16 +1330,26 @@ fn store_available_data( drop(erasure_span); - let erasure_chunks = chunks.iter().zip(branches.map(|(proof, _)| proof)).enumerate().map( - |(index, (chunk, proof))| ErasureChunk { + let erasure_chunks: Vec<_> = chunks + .iter() + .zip(branches.map(|(proof, _)| proof)) + .enumerate() + .map(|(index, (chunk, proof))| ErasureChunk { chunk: chunk.clone(), proof, - index: ValidatorIndex(index as u32), - }, - ); + index: ChunkIndex(index as u32), + }) + .collect(); - for chunk in erasure_chunks { - write_chunk(&mut tx, &subsystem.config, &candidate_hash, chunk.index, &chunk); + let chunk_indices = availability_chunk_indices(Some(&node_features), n_validators, core_index)?; + for (validator_index, chunk_index) in chunk_indices.into_iter().enumerate() { + write_chunk( + &mut tx, + &subsystem.config, + &candidate_hash, + ValidatorIndex(validator_index as u32), + &erasure_chunks[chunk_index.0 as usize], + ); } meta.data_available = true; diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs index 652bf2a3fda..e87f7cc3b8d 100644 --- a/polkadot/node/core/av-store/src/tests.rs +++ b/polkadot/node/core/av-store/src/tests.rs @@ -18,6 +18,7 @@ use super::*; use assert_matches::assert_matches; use futures::{channel::oneshot, executor, future, Future}; +use util::availability_chunks::availability_chunk_index; use self::test_helpers::mock::new_leaf; use ::test_helpers::TestCandidateBuilder; @@ -31,7 +32,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{database::Database, TimeoutExt}; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, HeadData, Header, + node_features, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, HeadData, Header, PersistedValidationData, ValidatorId, }; use sp_keyring::Sr25519Keyring; @@ -272,8 +273,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() { // but that's fine, we're still alive let (tx, rx) = oneshot::channel(); let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); - let query_chunk = AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx); + let query_chunk = AvailabilityStoreMessage::QueryChunk(candidate_hash, 5.into(), tx); overseer_send(&mut virtual_overseer, query_chunk.into()).await; @@ -288,12 +288,13 @@ fn store_chunk_works() { test_harness(TestState::default(), store.clone(), |mut virtual_overseer| async move { let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); + let chunk_index = ChunkIndex(5); + let validator_index = ValidatorIndex(2); let n_validators = 10; let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: validator_index, + index: chunk_index, proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; @@ -314,8 +315,12 @@ fn store_chunk_works() { let (tx, rx) = oneshot::channel(); - let chunk_msg = - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx }; + let chunk_msg = AvailabilityStoreMessage::StoreChunk { + candidate_hash, + validator_index, + chunk: chunk.clone(), + tx, + }; overseer_send(&mut virtual_overseer, chunk_msg).await; assert_eq!(rx.await.unwrap(), Ok(())); @@ -336,18 +341,23 @@ fn store_chunk_does_nothing_if_no_entry_already() { test_harness(TestState::default(), store.clone(), |mut virtual_overseer| async move { let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); + let chunk_index = ChunkIndex(5); + let validator_index = ValidatorIndex(2); let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: validator_index, + index: chunk_index, proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; let (tx, rx) = oneshot::channel(); - let chunk_msg = - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx }; + let chunk_msg = AvailabilityStoreMessage::StoreChunk { + candidate_hash, + validator_index, + chunk: chunk.clone(), + tx, + }; overseer_send(&mut virtual_overseer, chunk_msg).await; assert_eq!(rx.await.unwrap(), Err(())); @@ -418,6 +428,8 @@ fn store_available_data_erasure_mismatch() { let candidate_hash = CandidateHash(Hash::repeat_byte(1)); let validator_index = ValidatorIndex(5); let n_validators = 10; + let core_index = CoreIndex(8); + let node_features = NodeFeatures::EMPTY; let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; @@ -431,6 +443,8 @@ fn store_available_data_erasure_mismatch() { candidate_hash, n_validators, available_data: available_data.clone(), + core_index, + node_features, tx, // A dummy erasure root should lead to failure. expected_erasure_root: Hash::default(), @@ -450,97 +464,183 @@ fn store_available_data_erasure_mismatch() { } #[test] -fn store_block_works() { - let store = test_store(); - let test_state = TestState::default(); - test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { - let candidate_hash = CandidateHash(Hash::repeat_byte(1)); - let validator_index = ValidatorIndex(5); - let n_validators = 10; - - let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; - - let available_data = AvailableData { - pov: Arc::new(pov), - validation_data: test_state.persisted_validation_data.clone(), - }; - let (tx, rx) = oneshot::channel(); - - let chunks = erasure::obtain_chunks_v1(10, &available_data).unwrap(); - let mut branches = erasure::branches(chunks.as_ref()); - - let block_msg = AvailabilityStoreMessage::StoreAvailableData { - candidate_hash, - n_validators, - available_data: available_data.clone(), - tx, - expected_erasure_root: branches.root(), - }; - - virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; - assert_eq!(rx.await.unwrap(), Ok(())); - - let pov = query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); - assert_eq!(pov, available_data); - - let chunk = query_chunk(&mut virtual_overseer, candidate_hash, validator_index) - .await - .unwrap(); - - let branch = branches.nth(5).unwrap(); - let expected_chunk = ErasureChunk { - chunk: branch.1.to_vec(), - index: ValidatorIndex(5), - proof: Proof::try_from(branch.0).unwrap(), - }; - - assert_eq!(chunk, expected_chunk); - virtual_overseer - }); -} - -#[test] -fn store_pov_and_query_chunk_works() { - let store = test_store(); - let test_state = TestState::default(); - - test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { - let candidate_hash = CandidateHash(Hash::repeat_byte(1)); - let n_validators = 10; - - let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; - - let available_data = AvailableData { - pov: Arc::new(pov), - validation_data: test_state.persisted_validation_data.clone(), - }; - - let chunks_expected = - erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); - let branches = erasure::branches(chunks_expected.as_ref()); - - let (tx, rx) = oneshot::channel(); - let block_msg = AvailabilityStoreMessage::StoreAvailableData { - candidate_hash, - n_validators, - available_data, - tx, - expected_erasure_root: branches.root(), - }; - - virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; +fn store_pov_and_queries_work() { + // If the AvailabilityChunkMapping feature is not enabled, + // ValidatorIndex->ChunkIndex mapping should be 1:1 for all core indices. + { + let n_cores = 4; + for core_index in 0..n_cores { + let store = test_store(); + let test_state = TestState::default(); + let core_index = CoreIndex(core_index); + + test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { + let node_features = NodeFeatures::EMPTY; + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + let n_validators = 10; + + let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; + let available_data = AvailableData { + pov: Arc::new(pov), + validation_data: test_state.persisted_validation_data.clone(), + }; + + let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + + let branches = erasure::branches(chunks.as_ref()); + + let (tx, rx) = oneshot::channel(); + let block_msg = AvailabilityStoreMessage::StoreAvailableData { + candidate_hash, + n_validators, + available_data: available_data.clone(), + tx, + core_index, + expected_erasure_root: branches.root(), + node_features: node_features.clone(), + }; + + virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; + assert_eq!(rx.await.unwrap(), Ok(())); + + let pov: AvailableData = + query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); + assert_eq!(pov, available_data); + + let query_all_chunks_res = query_all_chunks( + &mut virtual_overseer, + availability_chunk_indices( + Some(&node_features), + n_validators as usize, + core_index, + ) + .unwrap(), + candidate_hash, + ) + .await; + assert_eq!(query_all_chunks_res.len(), chunks.len()); + + let branches: Vec<_> = branches.collect(); + + for validator_index in 0..n_validators { + let chunk = query_chunk( + &mut virtual_overseer, + candidate_hash, + ValidatorIndex(validator_index as _), + ) + .await + .unwrap(); + let branch = &branches[validator_index as usize]; + let expected_chunk = ErasureChunk { + chunk: branch.1.to_vec(), + index: validator_index.into(), + proof: Proof::try_from(branch.0.clone()).unwrap(), + }; + assert_eq!(chunk, expected_chunk); + assert_eq!(chunk, query_all_chunks_res[validator_index as usize]); + } - assert_eq!(rx.await.unwrap(), Ok(())); + virtual_overseer + }); + } + } - for i in 0..n_validators { - let chunk = query_chunk(&mut virtual_overseer, candidate_hash, ValidatorIndex(i as _)) - .await - .unwrap(); + // If the AvailabilityChunkMapping feature is enabled, let's also test the + // ValidatorIndex -> ChunkIndex mapping. + { + let n_cores = 4; + for core_index in 0..n_cores { + let store = test_store(); + let test_state = TestState::default(); + + test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { + let mut node_features = NodeFeatures::EMPTY; + let feature_bit = node_features::FeatureIndex::AvailabilityChunkMapping; + node_features.resize((feature_bit as u8 + 1) as usize, false); + node_features.set(feature_bit as u8 as usize, true); + + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + let n_validators = 10; + + let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; + let available_data = AvailableData { + pov: Arc::new(pov), + validation_data: test_state.persisted_validation_data.clone(), + }; + + let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + + let branches = erasure::branches(chunks.as_ref()); + let core_index = CoreIndex(core_index); + + let (tx, rx) = oneshot::channel(); + let block_msg = AvailabilityStoreMessage::StoreAvailableData { + candidate_hash, + n_validators, + available_data: available_data.clone(), + tx, + core_index, + expected_erasure_root: branches.root(), + node_features: node_features.clone(), + }; + + virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; + assert_eq!(rx.await.unwrap(), Ok(())); + + let pov: AvailableData = + query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); + assert_eq!(pov, available_data); + + let query_all_chunks_res = query_all_chunks( + &mut virtual_overseer, + availability_chunk_indices( + Some(&node_features), + n_validators as usize, + core_index, + ) + .unwrap(), + candidate_hash, + ) + .await; + assert_eq!(query_all_chunks_res.len(), chunks.len()); + + let branches: Vec<_> = branches.collect(); + + for validator_index in 0..n_validators { + let chunk = query_chunk( + &mut virtual_overseer, + candidate_hash, + ValidatorIndex(validator_index as _), + ) + .await + .unwrap(); + let expected_chunk_index = availability_chunk_index( + Some(&node_features), + n_validators as usize, + core_index, + ValidatorIndex(validator_index), + ) + .unwrap(); + let branch = &branches[expected_chunk_index.0 as usize]; + let expected_chunk = ErasureChunk { + chunk: branch.1.to_vec(), + index: expected_chunk_index, + proof: Proof::try_from(branch.0.clone()).unwrap(), + }; + assert_eq!(chunk, expected_chunk); + assert_eq!( + &chunk, + query_all_chunks_res + .iter() + .find(|c| c.index == expected_chunk_index) + .unwrap() + ); + } - assert_eq!(chunk.chunk, chunks_expected[i as usize]); + virtual_overseer + }); } - virtual_overseer - }); + } } #[test] @@ -575,6 +675,8 @@ fn query_all_chunks_works() { n_validators, available_data, tx, + core_index: CoreIndex(1), + node_features: NodeFeatures::EMPTY, expected_erasure_root: branches.root(), }; @@ -598,7 +700,7 @@ fn query_all_chunks_works() { let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: ValidatorIndex(1), + index: ChunkIndex(1), proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; @@ -606,6 +708,7 @@ fn query_all_chunks_works() { let store_chunk_msg = AvailabilityStoreMessage::StoreChunk { candidate_hash: candidate_hash_2, chunk, + validator_index: ValidatorIndex(1), tx, }; @@ -615,29 +718,29 @@ fn query_all_chunks_works() { assert_eq!(rx.await.unwrap(), Ok(())); } - { - let (tx, rx) = oneshot::channel(); + let chunk_indices = + availability_chunk_indices(None, n_validators as usize, CoreIndex(0)).unwrap(); - let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash_1, tx); - virtual_overseer.send(FromOrchestra::Communication { msg }).await; - assert_eq!(rx.await.unwrap().len(), n_validators as usize); - } - - { - let (tx, rx) = oneshot::channel(); - - let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash_2, tx); - virtual_overseer.send(FromOrchestra::Communication { msg }).await; - assert_eq!(rx.await.unwrap().len(), 1); - } + assert_eq!( + query_all_chunks(&mut virtual_overseer, chunk_indices.clone(), candidate_hash_1) + .await + .len(), + n_validators as usize + ); - { - let (tx, rx) = oneshot::channel(); + assert_eq!( + query_all_chunks(&mut virtual_overseer, chunk_indices.clone(), candidate_hash_2) + .await + .len(), + 1 + ); + assert_eq!( + query_all_chunks(&mut virtual_overseer, chunk_indices.clone(), candidate_hash_3) + .await + .len(), + 0 + ); - let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash_3, tx); - virtual_overseer.send(FromOrchestra::Communication { msg }).await; - assert_eq!(rx.await.unwrap().len(), 0); - } virtual_overseer }); } @@ -667,6 +770,8 @@ fn stored_but_not_included_data_is_pruned() { n_validators, available_data: available_data.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -723,6 +828,8 @@ fn stored_data_kept_until_finalized() { n_validators, available_data: available_data.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -998,6 +1105,8 @@ fn forkfullness_works() { n_validators, available_data: available_data_1.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -1014,6 +1123,8 @@ fn forkfullness_works() { n_validators, available_data: available_data_2.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -1126,6 +1237,25 @@ async fn query_chunk( rx.await.unwrap() } +async fn query_all_chunks( + virtual_overseer: &mut VirtualOverseer, + chunk_mapping: Vec, + candidate_hash: CandidateHash, +) -> Vec { + let (tx, rx) = oneshot::channel(); + + let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash, tx); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + + let resp = rx.await.unwrap(); + resp.into_iter() + .map(|(val_idx, chunk)| { + assert_eq!(chunk.index, chunk_mapping[val_idx.0 as usize]); + chunk + }) + .collect() +} + async fn has_all_chunks( virtual_overseer: &mut VirtualOverseer, candidate_hash: CandidateHash, @@ -1206,12 +1336,12 @@ fn query_chunk_size_works() { test_harness(TestState::default(), store.clone(), |mut virtual_overseer| async move { let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); + let chunk_index = ChunkIndex(5); let n_validators = 10; let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: validator_index, + index: chunk_index, proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; @@ -1232,8 +1362,12 @@ fn query_chunk_size_works() { let (tx, rx) = oneshot::channel(); - let chunk_msg = - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx }; + let chunk_msg = AvailabilityStoreMessage::StoreChunk { + candidate_hash, + chunk: chunk.clone(), + tx, + validator_index: chunk_index.into(), + }; overseer_send(&mut virtual_overseer, chunk_msg).await; assert_eq!(rx.await.unwrap(), Ok(())); diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a45edcbef52..2fa8ad29efe 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -210,6 +210,8 @@ struct PerRelayParentState { prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, + /// Session index. + session_index: SessionIndex, /// The `ParaId` assigned to the local validator at this relay parent. assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. @@ -534,6 +536,8 @@ async fn store_available_data( candidate_hash: CandidateHash, available_data: AvailableData, expected_erasure_root: Hash, + core_index: CoreIndex, + node_features: NodeFeatures, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); // Important: the `av-store` subsystem will check if the erasure root of the `available_data` @@ -546,6 +550,8 @@ async fn store_available_data( n_validators, available_data, expected_erasure_root, + core_index, + node_features, tx, }) .await; @@ -569,6 +575,8 @@ async fn make_pov_available( candidate_hash: CandidateHash, validation_data: PersistedValidationData, expected_erasure_root: Hash, + core_index: CoreIndex, + node_features: NodeFeatures, ) -> Result<(), Error> { store_available_data( sender, @@ -576,6 +584,8 @@ async fn make_pov_available( candidate_hash, AvailableData { pov, validation_data }, expected_erasure_root, + core_index, + node_features, ) .await } @@ -646,6 +656,7 @@ struct BackgroundValidationParams { tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, candidate: CandidateReceipt, relay_parent: Hash, + session_index: SessionIndex, persisted_validation_data: PersistedValidationData, pov: PoVData, n_validators: usize, @@ -657,12 +668,14 @@ async fn validate_and_make_available( impl overseer::CandidateBackingSenderTrait, impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync, >, + core_index: CoreIndex, ) -> Result<(), Error> { let BackgroundValidationParams { mut sender, mut tx_command, candidate, relay_parent, + session_index, persisted_validation_data, pov, n_validators, @@ -692,6 +705,10 @@ async fn validate_and_make_available( Err(e) => return Err(Error::UtilError(e)), }; + let node_features = request_node_features(relay_parent, session_index, &mut sender) + .await? + .unwrap_or(NodeFeatures::EMPTY); + let pov = match pov { PoVData::Ready(pov) => pov, PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => @@ -747,6 +764,8 @@ async fn validate_and_make_available( candidate.hash(), validation_data.clone(), candidate.descriptor.erasure_root, + core_index, + node_features, ) .await; @@ -1191,6 +1210,7 @@ async fn construct_per_relay_parent_state( Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, + session_index, assigned_core, assigned_para, backed: HashSet::new(), @@ -1788,10 +1808,11 @@ async fn background_validate_and_make_available( >, ) -> Result<(), Error> { let candidate_hash = params.candidate.hash(); + let Some(core_index) = rp_state.assigned_core else { return Ok(()) }; if rp_state.awaiting_validation.insert(candidate_hash) { // spawn background task. let bg = async move { - if let Err(error) = validate_and_make_available(params).await { + if let Err(error) = validate_and_make_available(params, core_index).await { if let Error::BackgroundValidationMpsc(error) = error { gum::debug!( target: LOG_TARGET, @@ -1866,6 +1887,7 @@ async fn kick_off_validation_work( tx_command: background_validation_tx.clone(), candidate: attesting.candidate, relay_parent: rp_state.parent, + session_index: rp_state.session_index, persisted_validation_data, pov, n_validators: rp_state.table_context.validators.len(), @@ -2019,6 +2041,7 @@ async fn validate_and_second( tx_command: background_validation_tx.clone(), candidate: candidate.clone(), relay_parent: rp_state.parent, + session_index: rp_state.session_index, persisted_validation_data, pov: PoVData::Ready(pov), n_validators: rp_state.table_context.validators.len(), @@ -2084,8 +2107,7 @@ async fn handle_second_message( collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); - - return Ok(()) + return Ok(()); } gum::debug!( diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index d1969e656db..00f9e4cd8ff 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -367,6 +367,15 @@ async fn assert_validation_requests( tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(sess_idx, tx)) + ) if sess_idx == 1 => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } async fn assert_validate_from_exhaustive( @@ -2084,7 +2093,7 @@ fn retry_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; // Not deterministic which message comes first: - for _ in 0u32..5 { + for _ in 0u32..6 { match virtual_overseer.recv().await { AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( _, @@ -2115,6 +2124,12 @@ fn retry_works() { )) => { tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(1, tx), + )) => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, msg => { assert!(false, "Unexpected message: {:?}", msg); }, @@ -2662,32 +2677,7 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) - ) if hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx)) - ) => { - tx.send(Ok(1u32.into())).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionExecutorParams(sess_idx, tx)) - ) if sess_idx == 1 => { - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - } - ); + assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index c93cf21ef7d..5ef3a3b1528 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -1435,7 +1435,13 @@ fn concurrent_dependent_candidates() { )) => { tx.send(Ok(test_state.validator_groups.clone())).unwrap(); }, - + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(sess_idx, tx), + )) => { + assert_eq!(sess_idx, 1); + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _parent, RuntimeApiRequest::AvailabilityCores(tx), diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index 89851c4a033..e3effb7949e 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -27,15 +27,14 @@ use futures::{ FutureExt, }; use polkadot_node_subsystem::{ - errors::RuntimeApiError, jaeger, - messages::{ - AvailabilityStoreMessage, BitfieldDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, - }, + messages::{AvailabilityStoreMessage, BitfieldDistributionMessage}, overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - SubsystemError, SubsystemResult, SubsystemSender, + SubsystemError, SubsystemResult, +}; +use polkadot_node_subsystem_util::{ + self as util, request_availability_cores, runtime::recv_runtime, Validator, }; -use polkadot_node_subsystem_util::{self as util, Validator}; use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; use sp_keystore::{Error as KeystoreError, KeystorePtr}; use std::{collections::HashMap, time::Duration}; @@ -69,7 +68,7 @@ pub enum Error { MpscSend(#[from] mpsc::SendError), #[error(transparent)] - Runtime(#[from] RuntimeApiError), + Runtime(#[from] util::runtime::Error), #[error("Keystore failed: {0:?}")] Keystore(KeystoreError), @@ -79,8 +78,8 @@ pub enum Error { /// for whether we have the availability chunk for our validator index. async fn get_core_availability( core: &CoreState, - validator_idx: ValidatorIndex, - sender: &Mutex<&mut impl SubsystemSender>, + validator_index: ValidatorIndex, + sender: &Mutex<&mut impl overseer::BitfieldSigningSenderTrait>, span: &jaeger::Span, ) -> Result { if let CoreState::Occupied(core) = core { @@ -90,14 +89,11 @@ async fn get_core_availability( sender .lock() .await - .send_message( - AvailabilityStoreMessage::QueryChunkAvailability( - core.candidate_hash, - validator_idx, - tx, - ) - .into(), - ) + .send_message(AvailabilityStoreMessage::QueryChunkAvailability( + core.candidate_hash, + validator_index, + tx, + )) .await; let res = rx.await.map_err(Into::into); @@ -116,25 +112,6 @@ async fn get_core_availability( } } -/// delegates to the v1 runtime API -async fn get_availability_cores( - relay_parent: Hash, - sender: &mut impl SubsystemSender, -) -> Result, Error> { - let (tx, rx) = oneshot::channel(); - sender - .send_message( - RuntimeApiMessage::Request(relay_parent, RuntimeApiRequest::AvailabilityCores(tx)) - .into(), - ) - .await; - match rx.await { - Ok(Ok(out)) => Ok(out), - Ok(Err(runtime_err)) => Err(runtime_err.into()), - Err(err) => Err(err.into()), - } -} - /// - get the list of core states from the runtime /// - for each core, concurrently determine chunk availability (see `get_core_availability`) /// - return the bitfield if there were no errors at any point in this process (otherwise, it's @@ -143,12 +120,12 @@ async fn construct_availability_bitfield( relay_parent: Hash, span: &jaeger::Span, validator_idx: ValidatorIndex, - sender: &mut impl SubsystemSender, + sender: &mut impl overseer::BitfieldSigningSenderTrait, ) -> Result { // get the set of availability cores from the runtime let availability_cores = { let _span = span.child("get-availability-cores"); - get_availability_cores(relay_parent, sender).await? + recv_runtime(request_availability_cores(relay_parent, sender).await).await? }; // Wrap the sender in a Mutex to share it between the futures. diff --git a/polkadot/node/core/bitfield-signing/src/tests.rs b/polkadot/node/core/bitfield-signing/src/tests.rs index 106ecc06b15..0e61e6086d2 100644 --- a/polkadot/node/core/bitfield-signing/src/tests.rs +++ b/polkadot/node/core/bitfield-signing/src/tests.rs @@ -16,7 +16,7 @@ use super::*; use futures::{executor::block_on, pin_mut, StreamExt}; -use polkadot_node_subsystem::messages::AllMessages; +use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}; use polkadot_primitives::{CandidateHash, OccupiedCore}; use test_helpers::dummy_candidate_descriptor; @@ -64,7 +64,7 @@ fn construct_availability_bitfield_works() { AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunkAvailability(c_hash, vidx, tx), ) => { - assert_eq!(validator_index, vidx); + assert_eq!(validator_index, vidx.into()); tx.send(c_hash == hash_a).unwrap(); }, diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 05ea7323af1..b58ce570f8f 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -305,6 +305,7 @@ async fn participate( req.candidate_receipt().clone(), req.session(), None, + None, recover_available_data_tx, )) .await; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index 367454115f0..1316508e84c 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -132,7 +132,7 @@ pub async fn participation_missing_availability(ctx_handle: &mut VirtualOverseer assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); }, @@ -151,7 +151,7 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Ok(available_data)).unwrap(); }, @@ -195,7 +195,7 @@ fn same_req_wont_get_queued_if_participation_is_already_running() { assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); }, @@ -260,7 +260,7 @@ fn reqs_get_queued_when_out_of_capacity() { { match ctx_handle.recv().await { AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx), + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx), ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); recover_available_data_msg_count += 1; @@ -346,7 +346,7 @@ fn cannot_participate_if_cannot_recover_available_data() { assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); }, @@ -412,7 +412,7 @@ fn cast_invalid_vote_if_available_data_is_invalid() { assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Invalid)).unwrap(); }, diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index 68fa57e2ca1..fcee8be9a50 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -85,7 +85,9 @@ use parity_scale_codec::Encode; use polkadot_node_primitives::PoV; -use polkadot_primitives::{BlakeTwo256, CandidateHash, Hash, HashT, Id as ParaId, ValidatorIndex}; +use polkadot_primitives::{ + BlakeTwo256, CandidateHash, ChunkIndex, Hash, HashT, Id as ParaId, ValidatorIndex, +}; use sc_network_types::PeerId; use std::{fmt, sync::Arc}; @@ -338,8 +340,8 @@ impl Span { } #[inline(always)] - pub fn with_chunk_index(self, chunk_index: u32) -> Self { - self.with_string_tag("chunk-index", chunk_index) + pub fn with_chunk_index(self, chunk_index: ChunkIndex) -> Self { + self.with_string_tag("chunk-index", &chunk_index.0) } #[inline(always)] diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 39e2985a88c..01b208421d7 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -19,6 +19,7 @@ polkadot-node-network-protocol = { path = "../protocol" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-primitives = { path = "../../primitives" } +sc-network = { path = "../../../../substrate/client/network" } sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keystore = { path = "../../../../substrate/primitives/keystore" } thiserror = { workspace = true } @@ -36,6 +37,7 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" assert_matches = "1.4.0" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +rstest = "0.18.2" polkadot-subsystem-bench = { path = "../../subsystem-bench" } diff --git a/polkadot/node/network/availability-distribution/src/error.rs b/polkadot/node/network/availability-distribution/src/error.rs index c547a1abbc2..72a809dd114 100644 --- a/polkadot/node/network/availability-distribution/src/error.rs +++ b/polkadot/node/network/availability-distribution/src/error.rs @@ -49,7 +49,7 @@ pub enum Error { #[fatal] #[error("Oneshot for receiving response from Chain API got cancelled")] - ChainApiSenderDropped(#[source] oneshot::Canceled), + ChainApiSenderDropped(#[from] oneshot::Canceled), #[fatal] #[error("Retrieving response from Chain API unexpectedly failed with error: {0}")] @@ -82,6 +82,9 @@ pub enum Error { #[error("Given validator index could not be found in current session")] InvalidValidatorIndex, + + #[error("Erasure coding error: {0}")] + ErasureCoding(#[from] polkadot_erasure_coding::Error), } /// General result abbreviation type alias. @@ -104,7 +107,8 @@ pub fn log_error( JfyiError::InvalidValidatorIndex | JfyiError::NoSuchCachedSession { .. } | JfyiError::QueryAvailableDataResponseChannel(_) | - JfyiError::QueryChunkResponseChannel(_) => gum::warn!(target: LOG_TARGET, error = %jfyi, ctx), + JfyiError::QueryChunkResponseChannel(_) | + JfyiError::ErasureCoding(_) => gum::warn!(target: LOG_TARGET, error = %jfyi, ctx), JfyiError::FetchPoV(_) | JfyiError::SendResponse | JfyiError::NoSuchPoV | diff --git a/polkadot/node/network/availability-distribution/src/lib.rs b/polkadot/node/network/availability-distribution/src/lib.rs index c62ce1dd981..ec2c01f99b0 100644 --- a/polkadot/node/network/availability-distribution/src/lib.rs +++ b/polkadot/node/network/availability-distribution/src/lib.rs @@ -18,7 +18,9 @@ use futures::{future::Either, FutureExt, StreamExt, TryFutureExt}; use sp_keystore::KeystorePtr; -use polkadot_node_network_protocol::request_response::{v1, IncomingRequestReceiver}; +use polkadot_node_network_protocol::request_response::{ + v1, v2, IncomingRequestReceiver, ReqProtocolNames, +}; use polkadot_node_subsystem::{ jaeger, messages::AvailabilityDistributionMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, @@ -41,7 +43,7 @@ mod pov_requester; /// Responding to erasure chunk requests: mod responder; -use responder::{run_chunk_receiver, run_pov_receiver}; +use responder::{run_chunk_receivers, run_pov_receiver}; mod metrics; /// Prometheus `Metrics` for availability distribution. @@ -58,6 +60,8 @@ pub struct AvailabilityDistributionSubsystem { runtime: RuntimeInfo, /// Receivers to receive messages from. recvs: IncomingRequestReceivers, + /// Mapping of the req-response protocols to the full protocol names. + req_protocol_names: ReqProtocolNames, /// Prometheus metrics. metrics: Metrics, } @@ -66,8 +70,10 @@ pub struct AvailabilityDistributionSubsystem { pub struct IncomingRequestReceivers { /// Receiver for incoming PoV requests. pub pov_req_receiver: IncomingRequestReceiver, - /// Receiver for incoming availability chunk requests. - pub chunk_req_receiver: IncomingRequestReceiver, + /// Receiver for incoming v1 availability chunk requests. + pub chunk_req_v1_receiver: IncomingRequestReceiver, + /// Receiver for incoming v2 availability chunk requests. + pub chunk_req_v2_receiver: IncomingRequestReceiver, } #[overseer::subsystem(AvailabilityDistribution, error=SubsystemError, prefix=self::overseer)] @@ -85,18 +91,27 @@ impl AvailabilityDistributionSubsystem { #[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)] impl AvailabilityDistributionSubsystem { /// Create a new instance of the availability distribution. - pub fn new(keystore: KeystorePtr, recvs: IncomingRequestReceivers, metrics: Metrics) -> Self { + pub fn new( + keystore: KeystorePtr, + recvs: IncomingRequestReceivers, + req_protocol_names: ReqProtocolNames, + metrics: Metrics, + ) -> Self { let runtime = RuntimeInfo::new(Some(keystore)); - Self { runtime, recvs, metrics } + Self { runtime, recvs, req_protocol_names, metrics } } /// Start processing work as passed on from the Overseer. async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { - let Self { mut runtime, recvs, metrics } = self; + let Self { mut runtime, recvs, metrics, req_protocol_names } = self; let mut spans: HashMap = HashMap::new(); - let IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver } = recvs; - let mut requester = Requester::new(metrics.clone()).fuse(); + let IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + } = recvs; + let mut requester = Requester::new(req_protocol_names, metrics.clone()).fuse(); let mut warn_freq = gum::Freq::new(); { @@ -109,7 +124,13 @@ impl AvailabilityDistributionSubsystem { ctx.spawn( "chunk-receiver", - run_chunk_receiver(sender, chunk_req_receiver, metrics.clone()).boxed(), + run_chunk_receivers( + sender, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + metrics.clone(), + ) + .boxed(), ) .map_err(FatalError::SpawnTask)?; } diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index f478defcaa9..7bd36709bc5 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -22,10 +22,12 @@ use futures::{ FutureExt, SinkExt, }; +use parity_scale_codec::Decode; use polkadot_erasure_coding::branch_hash; use polkadot_node_network_protocol::request_response::{ outgoing::{OutgoingRequest, Recipient, RequestError, Requests}, - v1::{ChunkFetchingRequest, ChunkFetchingResponse}, + v1::{self, ChunkResponse}, + v2, }; use polkadot_node_primitives::ErasureChunk; use polkadot_node_subsystem::{ @@ -34,9 +36,10 @@ use polkadot_node_subsystem::{ overseer, }; use polkadot_primitives::{ - AuthorityDiscoveryId, BlakeTwo256, CandidateHash, GroupIndex, Hash, HashT, OccupiedCore, - SessionIndex, + AuthorityDiscoveryId, BlakeTwo256, CandidateHash, ChunkIndex, GroupIndex, Hash, HashT, + OccupiedCore, SessionIndex, }; +use sc_network::ProtocolName; use crate::{ error::{FatalError, Result}, @@ -111,8 +114,8 @@ struct RunningTask { /// This vector gets drained during execution of the task (it will be empty afterwards). group: Vec, - /// The request to send. - request: ChunkFetchingRequest, + /// The request to send. We can store it as either v1 or v2, they have the same payload. + request: v2::ChunkFetchingRequest, /// Root hash, for verifying the chunks validity. erasure_root: Hash, @@ -128,6 +131,16 @@ struct RunningTask { /// Span tracking the fetching of this chunk. span: jaeger::Span, + + /// Expected chunk index. We'll validate that the remote did send us the correct chunk (only + /// important for v2 requests). + chunk_index: ChunkIndex, + + /// Full protocol name for ChunkFetchingV1. + req_v1_protocol_name: ProtocolName, + + /// Full protocol name for ChunkFetchingV2. + req_v2_protocol_name: ProtocolName, } impl FetchTaskConfig { @@ -140,13 +153,17 @@ impl FetchTaskConfig { sender: mpsc::Sender, metrics: Metrics, session_info: &SessionInfo, + chunk_index: ChunkIndex, span: jaeger::Span, + req_v1_protocol_name: ProtocolName, + req_v2_protocol_name: ProtocolName, ) -> Self { let span = span .child("fetch-task-config") .with_trace_id(core.candidate_hash) .with_string_tag("leaf", format!("{:?}", leaf)) .with_validator_index(session_info.our_index) + .with_chunk_index(chunk_index) .with_uint_tag("group-index", core.group_responsible.0 as u64) .with_relay_parent(core.candidate_descriptor.relay_parent) .with_string_tag("pov-hash", format!("{:?}", core.candidate_descriptor.pov_hash)) @@ -165,7 +182,7 @@ impl FetchTaskConfig { group: session_info.validator_groups.get(core.group_responsible.0 as usize) .expect("The responsible group of a candidate should be available in the corresponding session. qed.") .clone(), - request: ChunkFetchingRequest { + request: v2::ChunkFetchingRequest { candidate_hash: core.candidate_hash, index: session_info.our_index, }, @@ -174,6 +191,9 @@ impl FetchTaskConfig { metrics, sender, span, + chunk_index, + req_v1_protocol_name, + req_v2_protocol_name }; FetchTaskConfig { live_in, prepared_running: Some(prepared_running) } } @@ -271,7 +291,8 @@ impl RunningTask { count += 1; let _chunk_fetch_span = span .child("fetch-chunk-request") - .with_chunk_index(self.request.index.0) + .with_validator_index(self.request.index) + .with_chunk_index(self.chunk_index) .with_stage(jaeger::Stage::AvailabilityDistribution); // Send request: let resp = match self @@ -296,11 +317,12 @@ impl RunningTask { drop(_chunk_fetch_span); let _chunk_recombine_span = span .child("recombine-chunk") - .with_chunk_index(self.request.index.0) + .with_validator_index(self.request.index) + .with_chunk_index(self.chunk_index) .with_stage(jaeger::Stage::AvailabilityDistribution); let chunk = match resp { - ChunkFetchingResponse::Chunk(resp) => resp.recombine_into_chunk(&self.request), - ChunkFetchingResponse::NoSuchChunk => { + Some(chunk) => chunk, + None => { gum::debug!( target: LOG_TARGET, validator = ?validator, @@ -320,11 +342,12 @@ impl RunningTask { drop(_chunk_recombine_span); let _chunk_validate_and_store_span = span .child("validate-and-store-chunk") - .with_chunk_index(self.request.index.0) + .with_validator_index(self.request.index) + .with_chunk_index(self.chunk_index) .with_stage(jaeger::Stage::AvailabilityDistribution); // Data genuine? - if !self.validate_chunk(&validator, &chunk) { + if !self.validate_chunk(&validator, &chunk, self.chunk_index) { bad_validators.push(validator); continue } @@ -350,7 +373,7 @@ impl RunningTask { validator: &AuthorityDiscoveryId, network_error_freq: &mut gum::Freq, canceled_freq: &mut gum::Freq, - ) -> std::result::Result { + ) -> std::result::Result, TaskError> { gum::trace!( target: LOG_TARGET, origin = ?validator, @@ -362,9 +385,13 @@ impl RunningTask { "Starting chunk request", ); - let (full_request, response_recv) = - OutgoingRequest::new(Recipient::Authority(validator.clone()), self.request); - let requests = Requests::ChunkFetchingV1(full_request); + let (full_request, response_recv) = OutgoingRequest::new_with_fallback( + Recipient::Authority(validator.clone()), + self.request, + // Fallback to v1, for backwards compatibility. + v1::ChunkFetchingRequest::from(self.request), + ); + let requests = Requests::ChunkFetching(full_request); self.sender .send(FromFetchTask::Message( @@ -378,7 +405,58 @@ impl RunningTask { .map_err(|_| TaskError::ShuttingDown)?; match response_recv.await { - Ok(resp) => Ok(resp), + Ok((bytes, protocol)) => match protocol { + _ if protocol == self.req_v2_protocol_name => + match v2::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(chunk_response) => Ok(Option::::from(chunk_response)), + Err(e) => { + gum::warn!( + target: LOG_TARGET, + origin = ?validator, + relay_parent = ?self.relay_parent, + group_index = ?self.group_index, + session_index = ?self.session_index, + chunk_index = ?self.request.index, + candidate_hash = ?self.request.candidate_hash, + err = ?e, + "Peer sent us invalid erasure chunk data (v2)" + ); + Err(TaskError::PeerError) + }, + }, + _ if protocol == self.req_v1_protocol_name => + match v1::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(chunk_response) => Ok(Option::::from(chunk_response) + .map(|c| c.recombine_into_chunk(&self.request.into()))), + Err(e) => { + gum::warn!( + target: LOG_TARGET, + origin = ?validator, + relay_parent = ?self.relay_parent, + group_index = ?self.group_index, + session_index = ?self.session_index, + chunk_index = ?self.request.index, + candidate_hash = ?self.request.candidate_hash, + err = ?e, + "Peer sent us invalid erasure chunk data" + ); + Err(TaskError::PeerError) + }, + }, + _ => { + gum::warn!( + target: LOG_TARGET, + origin = ?validator, + relay_parent = ?self.relay_parent, + group_index = ?self.group_index, + session_index = ?self.session_index, + chunk_index = ?self.request.index, + candidate_hash = ?self.request.candidate_hash, + "Peer sent us invalid erasure chunk data - unknown protocol" + ); + Err(TaskError::PeerError) + }, + }, Err(RequestError::InvalidResponse(err)) => { gum::warn!( target: LOG_TARGET, @@ -427,7 +505,23 @@ impl RunningTask { } } - fn validate_chunk(&self, validator: &AuthorityDiscoveryId, chunk: &ErasureChunk) -> bool { + fn validate_chunk( + &self, + validator: &AuthorityDiscoveryId, + chunk: &ErasureChunk, + expected_chunk_index: ChunkIndex, + ) -> bool { + if chunk.index != expected_chunk_index { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.request.candidate_hash, + origin = ?validator, + chunk_index = ?chunk.index, + expected_chunk_index = ?expected_chunk_index, + "Validator sent the wrong chunk", + ); + return false + } let anticipated_hash = match branch_hash(&self.erasure_root, chunk.proof(), chunk.index.0 as usize) { Ok(hash) => hash, @@ -459,6 +553,7 @@ impl RunningTask { AvailabilityStoreMessage::StoreChunk { candidate_hash: self.request.candidate_hash, chunk, + validator_index: self.request.index, tx, } .into(), diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs index a5a81082e39..25fae37f725 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -24,21 +24,26 @@ use futures::{ task::{noop_waker, Context, Poll}, Future, FutureExt, StreamExt, }; +use rstest::rstest; use sc_network::{self as network, ProtocolName}; use sp_keyring::Sr25519Keyring; -use polkadot_node_network_protocol::request_response::{v1, Recipient}; +use polkadot_node_network_protocol::request_response::{ + v1::{self, ChunkResponse}, + Protocol, Recipient, ReqProtocolNames, +}; use polkadot_node_primitives::{BlockData, PoV, Proof}; use polkadot_node_subsystem::messages::AllMessages; -use polkadot_primitives::{CandidateHash, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, ChunkIndex, ValidatorIndex}; use super::*; use crate::{metrics::Metrics, tests::mock::get_valid_chunk_data}; #[test] fn task_can_be_canceled() { - let (task, _rx) = get_test_running_task(); + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let (task, _rx) = get_test_running_task(&req_protocol_names, 0.into(), 0.into()); let (handle, kill) = oneshot::channel(); std::mem::drop(handle); let running_task = task.run(kill); @@ -49,96 +54,130 @@ fn task_can_be_canceled() { } /// Make sure task won't accept a chunk that has is invalid. -#[test] -fn task_does_not_accept_invalid_chunk() { - let (mut task, rx) = get_test_running_task(); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_does_not_accept_invalid_chunk(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let chunk_index = ChunkIndex(1); + let validator_index = ValidatorIndex(0); + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); let validators = vec![Sr25519Keyring::Alice.public().into()]; task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( + [( Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: vec![1, 2, 3], - proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), - }), - ); - m + get_response( + protocol, + protocol_name.clone(), + Some(( + vec![1, 2, 3], + Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), + chunk_index, + )), + ), + )] + .into_iter() + .collect() }, valid_chunks: HashSet::new(), + req_protocol_names, }; test.run(task, rx); } -#[test] -fn task_stores_valid_chunk() { - let (mut task, rx) = get_test_running_task(); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_stores_valid_chunk(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + // In order for protocol version 1 to work, the chunk index needs to be equal to the validator + // index. + let chunk_index = ChunkIndex(0); + let validator_index = + if protocol == Protocol::ChunkFetchingV1 { ValidatorIndex(0) } else { ValidatorIndex(1) }; + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); + let validators = vec![Sr25519Keyring::Alice.public().into()]; let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - let (root_hash, chunk) = get_valid_chunk_data(pov); + let (root_hash, chunk) = get_valid_chunk_data(pov, 10, chunk_index); task.erasure_root = root_hash; - task.request.index = chunk.index; - - let validators = vec![Sr25519Keyring::Alice.public().into()]; task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( + [( Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: chunk.chunk.clone(), - proof: chunk.proof, - }), - ); - m - }, - valid_chunks: { - let mut s = HashSet::new(); - s.insert(chunk.chunk); - s + get_response( + protocol, + protocol_name.clone(), + Some((chunk.chunk.clone(), chunk.proof, chunk_index)), + ), + )] + .into_iter() + .collect() }, + valid_chunks: [(chunk.chunk)].into_iter().collect(), + req_protocol_names, }; test.run(task, rx); } -#[test] -fn task_does_not_accept_wrongly_indexed_chunk() { - let (mut task, rx) = get_test_running_task(); - let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - let (root_hash, chunk) = get_valid_chunk_data(pov); - task.erasure_root = root_hash; - task.request.index = ValidatorIndex(chunk.index.0 + 1); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_does_not_accept_wrongly_indexed_chunk(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + // In order for protocol version 1 to work, the chunk index needs to be equal to the validator + // index. + let chunk_index = ChunkIndex(0); + let validator_index = + if protocol == Protocol::ChunkFetchingV1 { ValidatorIndex(0) } else { ValidatorIndex(1) }; + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); let validators = vec![Sr25519Keyring::Alice.public().into()]; + let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; + let (_, other_chunk) = get_valid_chunk_data(pov.clone(), 10, ChunkIndex(3)); + let (root_hash, chunk) = get_valid_chunk_data(pov, 10, ChunkIndex(0)); + task.erasure_root = root_hash; + task.request.index = chunk.index.into(); task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( + [( Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: chunk.chunk.clone(), - proof: chunk.proof, - }), - ); - m + get_response( + protocol, + protocol_name.clone(), + Some((other_chunk.chunk.clone(), chunk.proof, other_chunk.index)), + ), + )] + .into_iter() + .collect() }, valid_chunks: HashSet::new(), + req_protocol_names, }; test.run(task, rx); } /// Task stores chunk, if there is at least one validator having a valid chunk. -#[test] -fn task_stores_valid_chunk_if_there_is_one() { - let (mut task, rx) = get_test_running_task(); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_stores_valid_chunk_if_there_is_one(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + // In order for protocol version 1 to work, the chunk index needs to be equal to the validator + // index. + let chunk_index = ChunkIndex(1); + let validator_index = + if protocol == Protocol::ChunkFetchingV1 { ValidatorIndex(1) } else { ValidatorIndex(2) }; + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - let (root_hash, chunk) = get_valid_chunk_data(pov); - task.erasure_root = root_hash; - task.request.index = chunk.index; let validators = [ // Only Alice has valid chunk - should succeed, even though she is tried last. @@ -151,37 +190,45 @@ fn task_stores_valid_chunk_if_there_is_one() { .iter() .map(|v| v.public().into()) .collect::>(); + + let (root_hash, chunk) = get_valid_chunk_data(pov, 10, chunk_index); + task.erasure_root = root_hash; task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( - Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: chunk.chunk.clone(), - proof: chunk.proof, - }), - ); - m.insert( - Recipient::Authority(Sr25519Keyring::Bob.public().into()), - ChunkFetchingResponse::NoSuchChunk, - ); - m.insert( - Recipient::Authority(Sr25519Keyring::Charlie.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: vec![1, 2, 3], - proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), - }), - ); - - m - }, - valid_chunks: { - let mut s = HashSet::new(); - s.insert(chunk.chunk); - s + [ + ( + Recipient::Authority(Sr25519Keyring::Alice.public().into()), + get_response( + protocol, + protocol_name.clone(), + Some((chunk.chunk.clone(), chunk.proof, chunk_index)), + ), + ), + ( + Recipient::Authority(Sr25519Keyring::Bob.public().into()), + get_response(protocol, protocol_name.clone(), None), + ), + ( + Recipient::Authority(Sr25519Keyring::Charlie.public().into()), + get_response( + protocol, + protocol_name.clone(), + Some(( + vec![1, 2, 3], + Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), + chunk_index, + )), + ), + ), + ] + .into_iter() + .collect() }, + valid_chunks: [(chunk.chunk)].into_iter().collect(), + req_protocol_names, }; test.run(task, rx); } @@ -189,14 +236,16 @@ fn task_stores_valid_chunk_if_there_is_one() { struct TestRun { /// Response to deliver for a given validator index. /// None means, answer with `NetworkError`. - chunk_responses: HashMap, + chunk_responses: HashMap, ProtocolName)>, /// Set of chunks that should be considered valid: valid_chunks: HashSet>, + /// Request protocol names + req_protocol_names: ReqProtocolNames, } impl TestRun { fn run(self, task: RunningTask, rx: mpsc::Receiver) { - sp_tracing::try_init_simple(); + sp_tracing::init_for_tests(); let mut rx = rx.fuse(); let task = task.run_inner().fuse(); futures::pin_mut!(task); @@ -240,20 +289,41 @@ impl TestRun { let mut valid_responses = 0; for req in reqs { let req = match req { - Requests::ChunkFetchingV1(req) => req, + Requests::ChunkFetching(req) => req, _ => panic!("Unexpected request"), }; let response = self.chunk_responses.get(&req.peer).ok_or(network::RequestFailure::Refused); - if let Ok(ChunkFetchingResponse::Chunk(resp)) = &response { - if self.valid_chunks.contains(&resp.chunk) { - valid_responses += 1; + if let Ok((resp, protocol)) = response { + let chunk = if protocol == + &self.req_protocol_names.get_name(Protocol::ChunkFetchingV1) + { + Into::>::into( + v1::ChunkFetchingResponse::decode(&mut &resp[..]).unwrap(), + ) + .map(|c| c.chunk) + } else if protocol == + &self.req_protocol_names.get_name(Protocol::ChunkFetchingV2) + { + Into::>::into( + v2::ChunkFetchingResponse::decode(&mut &resp[..]).unwrap(), + ) + .map(|c| c.chunk) + } else { + unreachable!() + }; + + if let Some(chunk) = chunk { + if self.valid_chunks.contains(&chunk) { + valid_responses += 1; + } } + + req.pending_response + .send(response.cloned()) + .expect("Sending response should succeed"); } - req.pending_response - .send(response.map(|r| (r.encode(), ProtocolName::from("")))) - .expect("Sending response should succeed"); } return (valid_responses == 0) && self.valid_chunks.is_empty() }, @@ -274,8 +344,12 @@ impl TestRun { } } -/// Get a `RunningTask` filled with dummy values. -fn get_test_running_task() -> (RunningTask, mpsc::Receiver) { +/// Get a `RunningTask` filled with (mostly) dummy values. +fn get_test_running_task( + req_protocol_names: &ReqProtocolNames, + validator_index: ValidatorIndex, + chunk_index: ChunkIndex, +) -> (RunningTask, mpsc::Receiver) { let (tx, rx) = mpsc::channel(0); ( @@ -283,16 +357,45 @@ fn get_test_running_task() -> (RunningTask, mpsc::Receiver) { session_index: 0, group_index: GroupIndex(0), group: Vec::new(), - request: ChunkFetchingRequest { + request: v2::ChunkFetchingRequest { candidate_hash: CandidateHash([43u8; 32].into()), - index: ValidatorIndex(0), + index: validator_index, }, erasure_root: Hash::repeat_byte(99), relay_parent: Hash::repeat_byte(71), sender: tx, metrics: Metrics::new_dummy(), span: jaeger::Span::Disabled, + req_v1_protocol_name: req_protocol_names.get_name(Protocol::ChunkFetchingV1), + req_v2_protocol_name: req_protocol_names.get_name(Protocol::ChunkFetchingV2), + chunk_index, }, rx, ) } + +/// Make a versioned ChunkFetchingResponse. +fn get_response( + protocol: Protocol, + protocol_name: ProtocolName, + chunk: Option<(Vec, Proof, ChunkIndex)>, +) -> (Vec, ProtocolName) { + ( + match protocol { + Protocol::ChunkFetchingV1 => if let Some((chunk, proof, _)) = chunk { + v1::ChunkFetchingResponse::Chunk(ChunkResponse { chunk, proof }) + } else { + v1::ChunkFetchingResponse::NoSuchChunk + } + .encode(), + Protocol::ChunkFetchingV2 => if let Some((chunk, proof, index)) = chunk { + v2::ChunkFetchingResponse::Chunk(ErasureChunk { chunk, index, proof }) + } else { + v2::ChunkFetchingResponse::NoSuchChunk + } + .encode(), + _ => unreachable!(), + }, + protocol_name, + ) +} diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index 97e80d696e7..efbdceb43bd 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -18,10 +18,7 @@ //! availability. use std::{ - collections::{ - hash_map::{Entry, HashMap}, - hash_set::HashSet, - }, + collections::{hash_map::HashMap, hash_set::HashSet}, iter::IntoIterator, pin::Pin, }; @@ -32,13 +29,17 @@ use futures::{ Stream, }; +use polkadot_node_network_protocol::request_response::{v1, v2, IsRequest, ReqProtocolNames}; use polkadot_node_subsystem::{ jaeger, messages::{ChainApiMessage, RuntimeApiMessage}, overseer, ActivatedLeaf, ActiveLeavesUpdate, }; -use polkadot_node_subsystem_util::runtime::{get_occupied_cores, RuntimeInfo}; -use polkadot_primitives::{CandidateHash, Hash, OccupiedCore, SessionIndex}; +use polkadot_node_subsystem_util::{ + availability_chunks::availability_chunk_index, + runtime::{get_occupied_cores, RuntimeInfo}, +}; +use polkadot_primitives::{CandidateHash, CoreIndex, Hash, OccupiedCore, SessionIndex}; use super::{FatalError, Metrics, Result, LOG_TARGET}; @@ -77,6 +78,9 @@ pub struct Requester { /// Prometheus Metrics metrics: Metrics, + + /// Mapping of the req-response protocols to the full protocol names. + req_protocol_names: ReqProtocolNames, } #[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)] @@ -88,9 +92,16 @@ impl Requester { /// /// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress /// by advancing the stream. - pub fn new(metrics: Metrics) -> Self { + pub fn new(req_protocol_names: ReqProtocolNames, metrics: Metrics) -> Self { let (tx, rx) = mpsc::channel(1); - Requester { fetches: HashMap::new(), session_cache: SessionCache::new(), tx, rx, metrics } + Requester { + fetches: HashMap::new(), + session_cache: SessionCache::new(), + tx, + rx, + metrics, + req_protocol_names, + } } /// Update heads that need availability distribution. @@ -197,56 +208,76 @@ impl Requester { runtime: &mut RuntimeInfo, leaf: Hash, leaf_session_index: SessionIndex, - cores: impl IntoIterator, + cores: impl IntoIterator, span: jaeger::Span, ) -> Result<()> { - for core in cores { + for (core_index, core) in cores { let mut span = span .child("check-fetch-candidate") .with_trace_id(core.candidate_hash) .with_string_tag("leaf", format!("{:?}", leaf)) .with_candidate(core.candidate_hash) .with_stage(jaeger::Stage::AvailabilityDistribution); - match self.fetches.entry(core.candidate_hash) { - Entry::Occupied(mut e) => + + if let Some(e) = self.fetches.get_mut(&core.candidate_hash) { // Just book keeping - we are already requesting that chunk: - { - span.add_string_tag("already-requested-chunk", "true"); - e.get_mut().add_leaf(leaf); - }, - Entry::Vacant(e) => { - span.add_string_tag("already-requested-chunk", "false"); - let tx = self.tx.clone(); - let metrics = self.metrics.clone(); - - let task_cfg = self - .session_cache - .with_session_info( - context, - runtime, - // We use leaf here, the relay_parent must be in the same session as - // the leaf. This is guaranteed by runtime which ensures that cores are - // cleared at session boundaries. At the same time, only leaves are - // guaranteed to be fetchable by the state trie. - leaf, - leaf_session_index, - |info| FetchTaskConfig::new(leaf, &core, tx, metrics, info, span), - ) - .await - .map_err(|err| { - gum::warn!( - target: LOG_TARGET, - error = ?err, - "Failed to spawn a fetch task" - ); - err + span.add_string_tag("already-requested-chunk", "true"); + e.add_leaf(leaf); + } else { + span.add_string_tag("already-requested-chunk", "false"); + let tx = self.tx.clone(); + let metrics = self.metrics.clone(); + + let session_info = self + .session_cache + .get_session_info( + context, + runtime, + // We use leaf here, the relay_parent must be in the same session as + // the leaf. This is guaranteed by runtime which ensures that cores are + // cleared at session boundaries. At the same time, only leaves are + // guaranteed to be fetchable by the state trie. + leaf, + leaf_session_index, + ) + .await + .map_err(|err| { + gum::warn!( + target: LOG_TARGET, + error = ?err, + "Failed to spawn a fetch task" + ); + err + })?; + + if let Some(session_info) = session_info { + let n_validators = + session_info.validator_groups.iter().fold(0usize, |mut acc, group| { + acc = acc.saturating_add(group.len()); + acc }); - - if let Ok(Some(task_cfg)) = task_cfg { - e.insert(FetchTask::start(task_cfg, context).await?); - } - // Not a validator, nothing to do. - }, + let chunk_index = availability_chunk_index( + session_info.node_features.as_ref(), + n_validators, + core_index, + session_info.our_index, + )?; + + let task_cfg = FetchTaskConfig::new( + leaf, + &core, + tx, + metrics, + session_info, + chunk_index, + span, + self.req_protocol_names.get_name(v1::ChunkFetchingRequest::PROTOCOL), + self.req_protocol_names.get_name(v2::ChunkFetchingRequest::PROTOCOL), + ); + + self.fetches + .insert(core.candidate_hash, FetchTask::start(task_cfg, context).await?); + } } } Ok(()) diff --git a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs index 8a48e19c282..a762c262dba 100644 --- a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs +++ b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs @@ -20,8 +20,10 @@ use rand::{seq::SliceRandom, thread_rng}; use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::overseer; -use polkadot_node_subsystem_util::runtime::RuntimeInfo; -use polkadot_primitives::{AuthorityDiscoveryId, GroupIndex, Hash, SessionIndex, ValidatorIndex}; +use polkadot_node_subsystem_util::runtime::{request_node_features, RuntimeInfo}; +use polkadot_primitives::{ + AuthorityDiscoveryId, GroupIndex, Hash, NodeFeatures, SessionIndex, ValidatorIndex, +}; use crate::{ error::{Error, Result}, @@ -62,6 +64,9 @@ pub struct SessionInfo { /// /// `None`, if we are not in fact part of any group. pub our_group: Option, + + /// Node features. + pub node_features: Option, } /// Report of bad validators. @@ -87,39 +92,29 @@ impl SessionCache { } } - /// Tries to retrieve `SessionInfo` and calls `with_info` if successful. - /// + /// Tries to retrieve `SessionInfo`. /// If this node is not a validator, the function will return `None`. - /// - /// Use this function over any `fetch_session_info` if all you need is a reference to - /// `SessionInfo`, as it avoids an expensive clone. - pub async fn with_session_info( - &mut self, + pub async fn get_session_info<'a, Context>( + &'a mut self, ctx: &mut Context, runtime: &mut RuntimeInfo, parent: Hash, session_index: SessionIndex, - with_info: F, - ) -> Result> - where - F: FnOnce(&SessionInfo) -> R, - { - if let Some(o_info) = self.session_info_cache.get(&session_index) { - gum::trace!(target: LOG_TARGET, session_index, "Got session from lru"); - return Ok(Some(with_info(o_info))) + ) -> Result> { + gum::trace!(target: LOG_TARGET, session_index, "Calling `get_session_info`"); + + if self.session_info_cache.get(&session_index).is_none() { + if let Some(info) = + Self::query_info_from_runtime(ctx, runtime, parent, session_index).await? + { + gum::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!"); + self.session_info_cache.insert(session_index, info); + } else { + return Ok(None) + } } - if let Some(info) = - self.query_info_from_runtime(ctx, runtime, parent, session_index).await? - { - gum::trace!(target: LOG_TARGET, session_index, "Calling `with_info`"); - let r = with_info(&info); - gum::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!"); - self.session_info_cache.insert(session_index, info); - Ok(Some(r)) - } else { - Ok(None) - } + Ok(self.session_info_cache.get(&session_index).map(|i| &*i)) } /// Variant of `report_bad` that never fails, but just logs errors. @@ -171,7 +166,6 @@ impl SessionCache { /// /// Returns: `None` if not a validator. async fn query_info_from_runtime( - &self, ctx: &mut Context, runtime: &mut RuntimeInfo, relay_parent: Hash, @@ -181,6 +175,9 @@ impl SessionCache { .get_session_info_by_index(ctx.sender(), relay_parent, session_index) .await?; + let node_features = + request_node_features(relay_parent, session_index, ctx.sender()).await?; + let discovery_keys = info.session_info.discovery_keys.clone(); let mut validator_groups = info.session_info.validator_groups.clone(); @@ -208,7 +205,13 @@ impl SessionCache { }) .collect(); - let info = SessionInfo { validator_groups, our_index, session_index, our_group }; + let info = SessionInfo { + validator_groups, + our_index, + session_index, + our_group, + node_features, + }; return Ok(Some(info)) } return Ok(None) diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index 0dedd4f091a..09567a8f87d 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -14,21 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::HashMap; - -use std::future::Future; - use futures::FutureExt; +use std::{collections::HashMap, future::Future}; -use polkadot_node_network_protocol::jaeger; +use polkadot_node_network_protocol::{jaeger, request_response::ReqProtocolNames}; use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; -use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, + BlockNumber, ChunkIndex, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, ScheduledCore, SessionIndex, SessionInfo, }; -use sp_core::traits::SpawnNamed; +use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; use polkadot_node_subsystem::{ messages::{ @@ -38,19 +34,21 @@ use polkadot_node_subsystem::{ ActiveLeavesUpdate, SpawnGlue, }; use polkadot_node_subsystem_test_helpers::{ - make_subsystem_context, mock::make_ferdie_keystore, TestSubsystemContext, - TestSubsystemContextHandle, + make_subsystem_context, + mock::{make_ferdie_keystore, new_leaf}, + TestSubsystemContext, TestSubsystemContextHandle, }; -use sp_core::testing::TaskExecutor; - -use crate::tests::mock::{get_valid_chunk_data, make_session_info, OccupiedCoreBuilder}; +use crate::tests::{ + mock::{get_valid_chunk_data, make_session_info, OccupiedCoreBuilder}, + node_features_with_mapping_enabled, +}; use super::Requester; fn get_erasure_chunk() -> ErasureChunk { let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - get_valid_chunk_data(pov).1 + get_valid_chunk_data(pov, 10, ChunkIndex(0)).1 } #[derive(Clone)] @@ -126,7 +124,7 @@ fn spawn_virtual_overseer( .expect("Receiver should be alive."); }, RuntimeApiRequest::NodeFeatures(_, tx) => { - tx.send(Ok(NodeFeatures::EMPTY)) + tx.send(Ok(node_features_with_mapping_enabled())) .expect("Receiver should be alive."); }, RuntimeApiRequest::AvailabilityCores(tx) => { @@ -146,6 +144,8 @@ fn spawn_virtual_overseer( group_responsible: GroupIndex(1), para_id, relay_parent: hash, + n_validators: 10, + chunk_index: ChunkIndex(0), } .build() .0, @@ -201,7 +201,8 @@ fn test_harness>( #[test] fn check_ancestry_lookup_in_same_session() { let test_state = TestState::new(); - let mut requester = Requester::new(Default::default()); + let mut requester = + Requester::new(ReqProtocolNames::new(&Hash::repeat_byte(0xff), None), Default::default()); let keystore = make_ferdie_keystore(); let mut runtime = RuntimeInfo::new(Some(keystore)); @@ -268,7 +269,8 @@ fn check_ancestry_lookup_in_same_session() { #[test] fn check_ancestry_lookup_in_different_sessions() { let mut test_state = TestState::new(); - let mut requester = Requester::new(Default::default()); + let mut requester = + Requester::new(ReqProtocolNames::new(&Hash::repeat_byte(0xff), None), Default::default()); let keystore = make_ferdie_keystore(); let mut runtime = RuntimeInfo::new(Some(keystore)); diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index 54b188f7f01..2c1885d2772 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -18,11 +18,12 @@ use std::sync::Arc; -use futures::channel::oneshot; +use futures::{channel::oneshot, select, FutureExt}; use fatality::Nested; +use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ - request_response::{v1, IncomingRequest, IncomingRequestReceiver}, + request_response::{v1, v2, IncomingRequest, IncomingRequestReceiver, IsRequest}, UnifiedReputationChange as Rep, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; @@ -66,33 +67,66 @@ pub async fn run_pov_receiver( } /// Receiver task to be forked as a separate task to handle chunk requests. -pub async fn run_chunk_receiver( +pub async fn run_chunk_receivers( mut sender: Sender, - mut receiver: IncomingRequestReceiver, + mut receiver_v1: IncomingRequestReceiver, + mut receiver_v2: IncomingRequestReceiver, metrics: Metrics, ) where Sender: SubsystemSender, { + let make_resp_v1 = |chunk: Option| match chunk { + None => v1::ChunkFetchingResponse::NoSuchChunk, + Some(chunk) => v1::ChunkFetchingResponse::Chunk(chunk.into()), + }; + + let make_resp_v2 = |chunk: Option| match chunk { + None => v2::ChunkFetchingResponse::NoSuchChunk, + Some(chunk) => v2::ChunkFetchingResponse::Chunk(chunk.into()), + }; + loop { - match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() { - Ok(Ok(msg)) => { - answer_chunk_request_log(&mut sender, msg, &metrics).await; - }, - Err(fatal) => { - gum::debug!( - target: LOG_TARGET, - error = ?fatal, - "Shutting down chunk receiver." - ); - return - }, - Ok(Err(jfyi)) => { - gum::debug!( - target: LOG_TARGET, - error = ?jfyi, - "Error decoding incoming chunk request." - ); + select! { + res = receiver_v1.recv(|| vec![COST_INVALID_REQUEST]).fuse() => match res.into_nested() { + Ok(Ok(msg)) => { + answer_chunk_request_log(&mut sender, msg, make_resp_v1, &metrics).await; + }, + Err(fatal) => { + gum::debug!( + target: LOG_TARGET, + error = ?fatal, + "Shutting down chunk receiver." + ); + return + }, + Ok(Err(jfyi)) => { + gum::debug!( + target: LOG_TARGET, + error = ?jfyi, + "Error decoding incoming chunk request." + ); + } }, + res = receiver_v2.recv(|| vec![COST_INVALID_REQUEST]).fuse() => match res.into_nested() { + Ok(Ok(msg)) => { + answer_chunk_request_log(&mut sender, msg.into(), make_resp_v2, &metrics).await; + }, + Err(fatal) => { + gum::debug!( + target: LOG_TARGET, + error = ?fatal, + "Shutting down chunk receiver." + ); + return + }, + Ok(Err(jfyi)) => { + gum::debug!( + target: LOG_TARGET, + error = ?jfyi, + "Error decoding incoming chunk request." + ); + } + } } } } @@ -124,15 +158,18 @@ pub async fn answer_pov_request_log( /// Variant of `answer_chunk_request` that does Prometheus metric and logging on errors. /// /// Any errors of `answer_request` will simply be logged. -pub async fn answer_chunk_request_log( +pub async fn answer_chunk_request_log( sender: &mut Sender, - req: IncomingRequest, + req: IncomingRequest, + make_response: MakeResp, metrics: &Metrics, -) -> () -where +) where + Req: IsRequest + Decode + Encode + Into, + Req::Response: Encode, Sender: SubsystemSender, + MakeResp: Fn(Option) -> Req::Response, { - let res = answer_chunk_request(sender, req).await; + let res = answer_chunk_request(sender, req, make_response).await; match res { Ok(result) => metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }), Err(err) => { @@ -177,39 +214,46 @@ where /// Answer an incoming chunk request by querying the av store. /// /// Returns: `Ok(true)` if chunk was found and served. -pub async fn answer_chunk_request( +pub async fn answer_chunk_request( sender: &mut Sender, - req: IncomingRequest, + req: IncomingRequest, + make_response: MakeResp, ) -> Result where Sender: SubsystemSender, + Req: IsRequest + Decode + Encode + Into, + Req::Response: Encode, + MakeResp: Fn(Option) -> Req::Response, { - let span = jaeger::Span::new(req.payload.candidate_hash, "answer-chunk-request"); + // V1 and V2 requests have the same payload, so decoding into either one will work. It's the + // responses that differ, hence the `MakeResp` generic. + let payload: v1::ChunkFetchingRequest = req.payload.into(); + let span = jaeger::Span::new(payload.candidate_hash, "answer-chunk-request"); let _child_span = span .child("answer-chunk-request") - .with_trace_id(req.payload.candidate_hash) - .with_chunk_index(req.payload.index.0); + .with_trace_id(payload.candidate_hash) + .with_validator_index(payload.index); - let chunk = query_chunk(sender, req.payload.candidate_hash, req.payload.index).await?; + let chunk = query_chunk(sender, payload.candidate_hash, payload.index).await?; let result = chunk.is_some(); gum::trace!( target: LOG_TARGET, - hash = ?req.payload.candidate_hash, - index = ?req.payload.index, + hash = ?payload.candidate_hash, + index = ?payload.index, peer = ?req.peer, has_data = ?chunk.is_some(), "Serving chunk", ); - let response = match chunk { - None => v1::ChunkFetchingResponse::NoSuchChunk, - Some(chunk) => v1::ChunkFetchingResponse::Chunk(chunk.into()), - }; + let response = make_response(chunk); + + req.pending_response + .send_response(response) + .map_err(|_| JfyiError::SendResponse)?; - req.send_response(response).map_err(|_| JfyiError::SendResponse)?; Ok(result) } diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs index 3df662fe546..b41c493a107 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mock.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs @@ -23,9 +23,9 @@ use sp_keyring::Sr25519Keyring; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV, Proof}; use polkadot_primitives::{ - CandidateCommitments, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, OccupiedCore, PersistedValidationData, - SessionInfo, ValidatorIndex, + CandidateCommitments, CandidateDescriptor, CandidateHash, ChunkIndex, + CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, OccupiedCore, + PersistedValidationData, SessionInfo, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, dummy_validation_code, @@ -75,13 +75,16 @@ pub struct OccupiedCoreBuilder { pub group_responsible: GroupIndex, pub para_id: ParaId, pub relay_parent: Hash, + pub n_validators: usize, + pub chunk_index: ChunkIndex, } impl OccupiedCoreBuilder { pub fn build(self) -> (OccupiedCore, (CandidateHash, ErasureChunk)) { let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; let pov_hash = pov.hash(); - let (erasure_root, chunk) = get_valid_chunk_data(pov.clone()); + let (erasure_root, chunk) = + get_valid_chunk_data(pov.clone(), self.n_validators, self.chunk_index); let candidate_receipt = TestCandidateBuilder { para_id: self.para_id, pov_hash, @@ -133,8 +136,11 @@ impl TestCandidateBuilder { } // Get chunk for index 0 -pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) { - let fake_validator_count = 10; +pub fn get_valid_chunk_data( + pov: PoV, + n_validators: usize, + chunk_index: ChunkIndex, +) -> (Hash, ErasureChunk) { let persisted = PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), relay_parent_number: Default::default(), @@ -142,17 +148,17 @@ pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) { relay_parent_storage_root: Default::default(), }; let available_data = AvailableData { validation_data: persisted, pov: Arc::new(pov) }; - let chunks = obtain_chunks(fake_validator_count, &available_data).unwrap(); + let chunks = obtain_chunks(n_validators, &available_data).unwrap(); let branches = branches(chunks.as_ref()); let root = branches.root(); let chunk = branches .enumerate() .map(|(index, (proof, chunk))| ErasureChunk { chunk: chunk.to_vec(), - index: ValidatorIndex(index as _), + index: ChunkIndex(index as _), proof: Proof::try_from(proof).unwrap(), }) - .next() - .expect("There really should be 10 chunks."); + .nth(chunk_index.0 as usize) + .expect("There really should be enough chunks."); (root, chunk) } diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs index 214498979fb..b30e11a293c 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mod.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs @@ -17,9 +17,12 @@ use std::collections::HashSet; use futures::{executor, future, Future}; +use rstest::rstest; -use polkadot_node_network_protocol::request_response::{IncomingRequest, ReqProtocolNames}; -use polkadot_primitives::{Block, CoreState, Hash}; +use polkadot_node_network_protocol::request_response::{ + IncomingRequest, Protocol, ReqProtocolNames, +}; +use polkadot_primitives::{node_features, Block, CoreState, Hash, NodeFeatures}; use sp_keystore::KeystorePtr; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -35,67 +38,129 @@ pub(crate) mod mock; fn test_harness>( keystore: KeystorePtr, + req_protocol_names: ReqProtocolNames, test_fx: impl FnOnce(TestHarness) -> T, -) { - sp_tracing::try_init_simple(); +) -> std::result::Result<(), FatalError> { + sp_tracing::init_for_tests(); let pool = sp_core::testing::TaskExecutor::new(); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); - let genesis_hash = Hash::repeat_byte(0xff); - let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None); let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, >(&req_protocol_names); - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver::< + let (chunk_req_v1_receiver, chunk_req_v1_cfg) = IncomingRequest::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&req_protocol_names); + let (chunk_req_v2_receiver, chunk_req_v2_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, >(&req_protocol_names); let subsystem = AvailabilityDistributionSubsystem::new( keystore, - IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + IncomingRequestReceivers { pov_req_receiver, chunk_req_v1_receiver, chunk_req_v2_receiver }, + req_protocol_names, Default::default(), ); let subsystem = subsystem.run(context); - let test_fut = test_fx(TestHarness { virtual_overseer, pov_req_cfg, chunk_req_cfg, pool }); + let test_fut = test_fx(TestHarness { + virtual_overseer, + pov_req_cfg, + chunk_req_v1_cfg, + chunk_req_v2_cfg, + pool, + }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); - executor::block_on(future::join(test_fut, subsystem)).1.unwrap(); + executor::block_on(future::join(test_fut, subsystem)).1 +} + +pub fn node_features_with_mapping_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features } /// Simple basic check, whether the subsystem works as expected. /// /// Exceptional cases are tested as unit tests in `fetch_task`. -#[test] -fn check_basic() { - let state = TestState::default(); - test_harness(state.keystore.clone(), move |harness| state.run(harness)); +#[rstest] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV1)] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV2)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV1)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV2)] +fn check_basic(#[case] node_features: NodeFeatures, #[case] chunk_resp_protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let state = + TestState::new(node_features.clone(), req_protocol_names.clone(), chunk_resp_protocol); + + if node_features == node_features_with_mapping_enabled() && + chunk_resp_protocol == Protocol::ChunkFetchingV1 + { + // For this specific case, chunk fetching is not possible, because the ValidatorIndex is not + // equal to the ChunkIndex and the peer does not send back the actual ChunkIndex. + let _ = test_harness(state.keystore.clone(), req_protocol_names, move |harness| { + state.run_assert_timeout(harness) + }); + } else { + test_harness(state.keystore.clone(), req_protocol_names, move |harness| state.run(harness)) + .unwrap(); + } } /// Check whether requester tries all validators in group. -#[test] -fn check_fetch_tries_all() { - let mut state = TestState::default(); +#[rstest] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV1)] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV2)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV1)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV2)] +fn check_fetch_tries_all( + #[case] node_features: NodeFeatures, + #[case] chunk_resp_protocol: Protocol, +) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let mut state = + TestState::new(node_features.clone(), req_protocol_names.clone(), chunk_resp_protocol); for (_, v) in state.chunks.iter_mut() { // 4 validators in group, so this should still succeed: v.push(None); v.push(None); v.push(None); } - test_harness(state.keystore.clone(), move |harness| state.run(harness)); + + if node_features == node_features_with_mapping_enabled() && + chunk_resp_protocol == Protocol::ChunkFetchingV1 + { + // For this specific case, chunk fetching is not possible, because the ValidatorIndex is not + // equal to the ChunkIndex and the peer does not send back the actual ChunkIndex. + let _ = test_harness(state.keystore.clone(), req_protocol_names, move |harness| { + state.run_assert_timeout(harness) + }); + } else { + test_harness(state.keystore.clone(), req_protocol_names, move |harness| state.run(harness)) + .unwrap(); + } } /// Check whether requester tries all validators in group /// /// Check that requester will retry the fetch on error on the next block still pending /// availability. -#[test] -fn check_fetch_retry() { - let mut state = TestState::default(); +#[rstest] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV1)] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV2)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV1)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV2)] +fn check_fetch_retry(#[case] node_features: NodeFeatures, #[case] chunk_resp_protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let mut state = + TestState::new(node_features.clone(), req_protocol_names.clone(), chunk_resp_protocol); state .cores .insert(state.relay_chain[2], state.cores.get(&state.relay_chain[1]).unwrap().clone()); @@ -126,5 +191,17 @@ fn check_fetch_retry() { v.push(None); v.push(None); } - test_harness(state.keystore.clone(), move |harness| state.run(harness)); + + if node_features == node_features_with_mapping_enabled() && + chunk_resp_protocol == Protocol::ChunkFetchingV1 + { + // For this specific case, chunk fetching is not possible, because the ValidatorIndex is not + // equal to the ChunkIndex and the peer does not send back the actual ChunkIndex. + let _ = test_harness(state.keystore.clone(), req_protocol_names, move |harness| { + state.run_assert_timeout(harness) + }); + } else { + test_harness(state.keystore.clone(), req_protocol_names, move |harness| state.run(harness)) + .unwrap(); + } } diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 93411511e76..ecc3eefbf3d 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -19,9 +19,9 @@ use std::{ time::Duration, }; -use network::ProtocolName; +use network::{request_responses::OutgoingResponse, ProtocolName, RequestFailure}; use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; -use polkadot_node_subsystem_util::TimeoutExt; +use polkadot_node_subsystem_util::{availability_chunks::availability_chunk_index, TimeoutExt}; use futures::{ channel::{mpsc, oneshot}, @@ -35,7 +35,7 @@ use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::request_response::{ - v1, IncomingRequest, OutgoingRequest, Requests, + v1, v2, IncomingRequest, OutgoingRequest, Protocol, ReqProtocolNames, Requests, }; use polkadot_node_primitives::ErasureChunk; use polkadot_node_subsystem::{ @@ -47,8 +47,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, - ScheduledCore, SessionInfo, ValidatorIndex, + CandidateHash, ChunkIndex, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, + Id as ParaId, NodeFeatures, ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; @@ -59,7 +59,8 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle>, pub keystore: KeystorePtr, + pub node_features: NodeFeatures, + pub chunk_response_protocol: Protocol, + pub req_protocol_names: ReqProtocolNames, + pub our_chunk_index: ChunkIndex, } -impl Default for TestState { - fn default() -> Self { +impl TestState { + /// Initialize a default test state. + pub fn new( + node_features: NodeFeatures, + req_protocol_names: ReqProtocolNames, + chunk_response_protocol: Protocol, + ) -> Self { let relay_chain: Vec<_> = (1u8..10).map(Hash::repeat_byte).collect(); let chain_a = ParaId::from(1); let chain_b = ParaId::from(2); @@ -97,6 +107,14 @@ impl Default for TestState { let session_info = make_session_info(); + let our_chunk_index = availability_chunk_index( + Some(&node_features), + session_info.validators.len(), + CoreIndex(1), + ValidatorIndex(0), + ) + .unwrap(); + let (cores, chunks) = { let mut cores = HashMap::new(); let mut chunks = HashMap::new(); @@ -123,6 +141,8 @@ impl Default for TestState { group_responsible: GroupIndex(i as _), para_id: *para_id, relay_parent: *relay_parent, + n_validators: session_info.validators.len(), + chunk_index: our_chunk_index, } .build(); (CoreState::Occupied(core), chunk) @@ -132,8 +152,8 @@ impl Default for TestState { // Skip chunks for our own group (won't get fetched): let mut chunks_other_groups = p_chunks.into_iter(); chunks_other_groups.next(); - for (validator_index, chunk) in chunks_other_groups { - chunks.insert((validator_index, chunk.index), vec![Some(chunk)]); + for (candidate, chunk) in chunks_other_groups { + chunks.insert((candidate, ValidatorIndex(0)), vec![Some(chunk)]); } } (cores, chunks) @@ -145,18 +165,27 @@ impl Default for TestState { session_info, cores, keystore, + node_features, + chunk_response_protocol, + req_protocol_names, + our_chunk_index, } } -} -impl TestState { /// Run, but fail after some timeout. pub async fn run(self, harness: TestHarness) { // Make sure test won't run forever. - let f = self.run_inner(harness).timeout(Duration::from_secs(10)); + let f = self.run_inner(harness).timeout(Duration::from_secs(5)); assert!(f.await.is_some(), "Test ran into timeout"); } + /// Run, and assert an expected timeout. + pub async fn run_assert_timeout(self, harness: TestHarness) { + // Make sure test won't run forever. + let f = self.run_inner(harness).timeout(Duration::from_secs(5)); + assert!(f.await.is_none(), "Test should have run into timeout"); + } + /// Run tests with the given mock values in `TestState`. /// /// This will simply advance through the simulated chain and examines whether the subsystem @@ -214,15 +243,41 @@ impl TestState { )) => { for req in reqs { // Forward requests: - let in_req = to_incoming_req(&harness.pool, req); - harness - .chunk_req_cfg - .inbound_queue - .as_mut() - .unwrap() - .send(in_req.into_raw()) - .await - .unwrap(); + match self.chunk_response_protocol { + Protocol::ChunkFetchingV1 => { + let in_req = to_incoming_req_v1( + &harness.pool, + req, + self.req_protocol_names.get_name(Protocol::ChunkFetchingV1), + ); + + harness + .chunk_req_v1_cfg + .inbound_queue + .as_mut() + .unwrap() + .send(in_req.into_raw()) + .await + .unwrap(); + }, + Protocol::ChunkFetchingV2 => { + let in_req = to_incoming_req_v2( + &harness.pool, + req, + self.req_protocol_names.get_name(Protocol::ChunkFetchingV2), + ); + + harness + .chunk_req_v2_cfg + .inbound_queue + .as_mut() + .unwrap() + .send(in_req.into_raw()) + .await + .unwrap(); + }, + _ => panic!("Unexpected protocol"), + } } }, AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryChunk( @@ -240,13 +295,16 @@ impl TestState { AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk, + validator_index, tx, .. }) => { assert!( - self.valid_chunks.contains(&(candidate_hash, chunk.index)), + self.valid_chunks.contains(&(candidate_hash, validator_index)), "Only valid chunks should ever get stored." ); + assert_eq!(self.our_chunk_index, chunk.index); + tx.send(Ok(())).expect("Receiver is expected to be alive"); gum::trace!(target: LOG_TARGET, "'Stored' fetched chunk."); remaining_stores -= 1; @@ -265,14 +323,15 @@ impl TestState { tx.send(Ok(Some(ExecutorParams::default()))) .expect("Receiver should be alive."); }, - RuntimeApiRequest::NodeFeatures(_, si_tx) => { - si_tx.send(Ok(NodeFeatures::EMPTY)).expect("Receiver should be alive."); - }, RuntimeApiRequest::AvailabilityCores(tx) => { gum::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash"); tx.send(Ok(self.cores[&hash].clone())) .expect("Receiver should still be alive"); }, + RuntimeApiRequest::NodeFeatures(_, tx) => { + tx.send(Ok(self.node_features.clone())) + .expect("Receiver should still be alive"); + }, _ => { panic!("Unexpected runtime request: {:?}", req); }, @@ -286,7 +345,10 @@ impl TestState { .unwrap_or_default(); response_channel.send(Ok(ancestors)).expect("Receiver is expected to be alive"); }, - _ => {}, + + _ => { + panic!("Received unexpected message") + }, } } @@ -310,30 +372,47 @@ async fn overseer_recv(rx: &mut mpsc::UnboundedReceiver) -> AllMess rx.next().await.expect("Test subsystem no longer live") } -fn to_incoming_req( +fn to_incoming_req_v1( executor: &TaskExecutor, outgoing: Requests, + protocol_name: ProtocolName, ) -> IncomingRequest { match outgoing { - Requests::ChunkFetchingV1(OutgoingRequest { payload, pending_response, .. }) => { - let (tx, rx): (oneshot::Sender, oneshot::Receiver<_>) = - oneshot::channel(); - executor.spawn( - "message-forwarding", - None, - async { - let response = rx.await; - let payload = response.expect("Unexpected canceled request").result; - pending_response - .send( - payload - .map_err(|_| network::RequestFailure::Refused) - .map(|r| (r, ProtocolName::from(""))), - ) - .expect("Sending response is expected to work"); - } - .boxed(), - ); + Requests::ChunkFetching(OutgoingRequest { + pending_response, + fallback_request: Some((fallback_request, fallback_protocol)), + .. + }) => { + assert_eq!(fallback_protocol, Protocol::ChunkFetchingV1); + + let tx = spawn_message_forwarding(executor, protocol_name, pending_response); + + IncomingRequest::new( + // We don't really care: + network::PeerId::random().into(), + fallback_request, + tx, + ) + }, + _ => panic!("Unexpected request!"), + } +} + +fn to_incoming_req_v2( + executor: &TaskExecutor, + outgoing: Requests, + protocol_name: ProtocolName, +) -> IncomingRequest { + match outgoing { + Requests::ChunkFetching(OutgoingRequest { + payload, + pending_response, + fallback_request: Some((_, fallback_protocol)), + .. + }) => { + assert_eq!(fallback_protocol, Protocol::ChunkFetchingV1); + + let tx = spawn_message_forwarding(executor, protocol_name, pending_response); IncomingRequest::new( // We don't really care: @@ -345,3 +424,26 @@ fn to_incoming_req( _ => panic!("Unexpected request!"), } } + +fn spawn_message_forwarding( + executor: &TaskExecutor, + protocol_name: ProtocolName, + pending_response: oneshot::Sender, ProtocolName), RequestFailure>>, +) -> oneshot::Sender { + let (tx, rx): (oneshot::Sender, oneshot::Receiver<_>) = + oneshot::channel(); + executor.spawn( + "message-forwarding", + None, + async { + let response = rx.await; + let payload = response.expect("Unexpected canceled request").result; + pending_response + .send(payload.map_err(|_| RequestFailure::Refused).map(|r| (r, protocol_name))) + .expect("Sending response is expected to work"); + } + .boxed(), + ); + + tx +} diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index eb503f502b2..1c2b5f4968a 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -30,10 +30,11 @@ sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.11" futures-timer = "3.0.2" +rstest = "0.18.2" log = { workspace = true, default-features = true } +sp-tracing = { path = "../../../../substrate/primitives/tracing" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index d36b898ea15..e5a8f1eb7c9 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -23,7 +23,7 @@ use polkadot_subsystem_bench::{ availability::{ - benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, + benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, Strategy, TestDataAvailability, TestState, }, configuration::TestConfiguration, @@ -37,7 +37,7 @@ const BENCH_COUNT: usize = 10; fn main() -> Result<(), String> { let mut messages = vec![]; - let options = DataAvailabilityReadOptions { fetch_from_backers: true }; + let options = DataAvailabilityReadOptions { strategy: Strategy::FullFromBackers }; let mut config = TestConfiguration::default(); config.num_blocks = 3; config.generate_pov_sizes(); diff --git a/polkadot/node/network/availability-recovery/src/error.rs b/polkadot/node/network/availability-recovery/src/error.rs index 47277a521b8..eaec4cbc9d9 100644 --- a/polkadot/node/network/availability-recovery/src/error.rs +++ b/polkadot/node/network/availability-recovery/src/error.rs @@ -16,20 +16,34 @@ //! The `Error` and `Result` types used by the subsystem. +use crate::LOG_TARGET; +use fatality::{fatality, Nested}; use futures::channel::oneshot; -use thiserror::Error; +use polkadot_node_network_protocol::request_response::incoming; +use polkadot_node_subsystem::{RecoveryError, SubsystemError}; +use polkadot_primitives::Hash; /// Error type used by the Availability Recovery subsystem. -#[derive(Debug, Error)] +#[fatality(splitable)] pub enum Error { - #[error(transparent)] - Subsystem(#[from] polkadot_node_subsystem::SubsystemError), + #[fatal] + #[error("Spawning subsystem task failed: {0}")] + SpawnTask(#[source] SubsystemError), + + /// Receiving subsystem message from overseer failed. + #[fatal] + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), + #[fatal] #[error("failed to query full data from store")] CanceledQueryFullData(#[source] oneshot::Canceled), - #[error("failed to query session info")] - CanceledSessionInfo(#[source] oneshot::Canceled), + #[error("`SessionInfo` is `None` at {0}")] + SessionInfoUnavailable(Hash), + + #[error("failed to query node features from runtime")] + RequestNodeFeatures(#[source] polkadot_node_subsystem_util::runtime::Error), #[error("failed to send response")] CanceledResponseSender, @@ -40,8 +54,38 @@ pub enum Error { #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), + #[fatal] #[error(transparent)] - Util(#[from] polkadot_node_subsystem_util::Error), + Oneshot(#[from] oneshot::Canceled), + + #[fatal(forward)] + #[error("Error during recovery: {0}")] + Recovery(#[from] RecoveryError), + + #[fatal(forward)] + #[error("Retrieving next incoming request failed: {0}")] + IncomingRequest(#[from] incoming::Error), } pub type Result = std::result::Result; + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error, unless the error is fatal for the entire +/// subsystem. +pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + jfyi.log(); + Ok(()) + }, + } +} + +impl JfyiError { + /// Log a `JfyiError`. + pub fn log(self) { + gum::warn!(target: LOG_TARGET, "{}", self); + } +} diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index b836870cd8a..167125f987a 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -19,7 +19,7 @@ #![warn(missing_docs)] use std::{ - collections::{HashMap, VecDeque}, + collections::{BTreeMap, VecDeque}, iter::Iterator, num::NonZeroUsize, pin::Pin, @@ -34,31 +34,41 @@ use futures::{ stream::{FuturesUnordered, StreamExt}, task::{Context, Poll}, }; +use sc_network::ProtocolName; use schnellru::{ByLength, LruMap}; -use task::{FetchChunks, FetchChunksParams, FetchFull, FetchFullParams}; +use task::{ + FetchChunks, FetchChunksParams, FetchFull, FetchFullParams, FetchSystematicChunks, + FetchSystematicChunksParams, +}; -use fatality::Nested; use polkadot_erasure_coding::{ - branch_hash, branches, obtain_chunks_v1, recovery_threshold, Error as ErasureEncodingError, + branches, obtain_chunks_v1, recovery_threshold, systematic_recovery_threshold, + Error as ErasureEncodingError, }; use task::{RecoveryParams, RecoveryStrategy, RecoveryTask}; +use error::{log_error, Error, FatalError, Result}; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, IncomingRequestReceiver}, + request_response::{ + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, IsRequest, ReqProtocolNames, + }, UnifiedReputationChange as Rep, }; -use polkadot_node_primitives::{AvailableData, ErasureChunk}; +use polkadot_node_primitives::AvailableData; use polkadot_node_subsystem::{ errors::RecoveryError, jaeger, messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, - SubsystemContext, SubsystemError, SubsystemResult, + SubsystemContext, SubsystemError, +}; +use polkadot_node_subsystem_util::{ + availability_chunks::availability_chunk_indices, + runtime::{ExtendedSessionInfo, RuntimeInfo}, }; -use polkadot_node_subsystem_util::request_session_info; use polkadot_primitives::{ - BlakeTwo256, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash, HashT, - SessionIndex, SessionInfo, ValidatorIndex, + node_features, BlockNumber, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, GroupIndex, + Hash, SessionIndex, ValidatorIndex, }; mod error; @@ -70,6 +80,8 @@ pub use metrics::Metrics; #[cfg(test)] mod tests; +type RecoveryResult = std::result::Result; + const LOG_TARGET: &str = "parachain::availability-recovery"; // Size of the LRU cache where we keep recovered data. @@ -85,13 +97,27 @@ pub const FETCH_CHUNKS_THRESHOLD: usize = 4 * 1024 * 1024; #[derive(Clone, PartialEq)] /// The strategy we use to recover the PoV. pub enum RecoveryStrategyKind { - /// We always try the backing group first, then fallback to validator chunks. - BackersFirstAlways, /// We try the backing group first if PoV size is lower than specified, then fallback to /// validator chunks. BackersFirstIfSizeLower(usize), + /// We try the backing group first if PoV size is lower than specified, then fallback to + /// systematic chunks. Regular chunk recovery as a last resort. + BackersFirstIfSizeLowerThenSystematicChunks(usize), + + /// The following variants are only helpful for integration tests. + /// + /// We always try the backing group first, then fallback to validator chunks. + #[allow(dead_code)] + BackersFirstAlways, /// We always recover using validator chunks. + #[allow(dead_code)] ChunksAlways, + /// First try the backing group. Then systematic chunks. + #[allow(dead_code)] + BackersThenSystematicChunks, + /// Always recover using systematic chunks, fall back to regular chunks. + #[allow(dead_code)] + SystematicChunks, } /// The Availability Recovery Subsystem. @@ -109,11 +135,15 @@ pub struct AvailabilityRecoverySubsystem { metrics: Metrics, /// The type of check to perform after available data was recovered. post_recovery_check: PostRecoveryCheck, + /// Full protocol name for ChunkFetchingV1. + req_v1_protocol_name: ProtocolName, + /// Full protocol name for ChunkFetchingV2. + req_v2_protocol_name: ProtocolName, } #[derive(Clone, PartialEq, Debug)] /// The type of check to perform after available data was recovered. -pub enum PostRecoveryCheck { +enum PostRecoveryCheck { /// Reencode the data and check erasure root. For validators. Reencode, /// Only check the pov hash. For collators only. @@ -121,56 +151,18 @@ pub enum PostRecoveryCheck { } /// Expensive erasure coding computations that we want to run on a blocking thread. -pub enum ErasureTask { +enum ErasureTask { /// Reconstructs `AvailableData` from chunks given `n_validators`. Reconstruct( usize, - HashMap, - oneshot::Sender>, + BTreeMap>, + oneshot::Sender>, ), /// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of /// the Merkle tree. Reencode(usize, Hash, AvailableData, oneshot::Sender>), } -const fn is_unavailable( - received_chunks: usize, - requesting_chunks: usize, - unrequested_validators: usize, - threshold: usize, -) -> bool { - received_chunks + requesting_chunks + unrequested_validators < threshold -} - -/// Check validity of a chunk. -fn is_chunk_valid(params: &RecoveryParams, chunk: &ErasureChunk) -> bool { - let anticipated_hash = - match branch_hash(¶ms.erasure_root, chunk.proof(), chunk.index.0 as usize) { - Ok(hash) => hash, - Err(e) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - error = ?e, - "Invalid Merkle proof", - ); - return false - }, - }; - let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk); - if anticipated_hash != erasure_chunk_hash { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Merkle proof mismatch" - ); - return false - } - true -} - /// Re-encode the data into erasure chunks in order to verify /// the root hash of the provided Merkle tree, which is built /// on-top of the encoded chunks. @@ -214,12 +206,12 @@ fn reconstructed_data_matches_root( /// Accumulate all awaiting sides for some particular `AvailableData`. struct RecoveryHandle { candidate_hash: CandidateHash, - remote: RemoteHandle>, - awaiting: Vec>>, + remote: RemoteHandle, + awaiting: Vec>, } impl Future for RecoveryHandle { - type Output = Option<(CandidateHash, Result)>; + type Output = Option<(CandidateHash, RecoveryResult)>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut indices_to_remove = Vec::new(); @@ -273,7 +265,7 @@ enum CachedRecovery { impl CachedRecovery { /// Convert back to `Result` to deliver responses. - fn into_result(self) -> Result { + fn into_result(self) -> RecoveryResult { match self { Self::Valid(d) => Ok(d), Self::Invalid => Err(RecoveryError::Invalid), @@ -281,9 +273,9 @@ impl CachedRecovery { } } -impl TryFrom> for CachedRecovery { +impl TryFrom for CachedRecovery { type Error = (); - fn try_from(o: Result) -> Result { + fn try_from(o: RecoveryResult) -> std::result::Result { match o { Ok(d) => Ok(Self::Valid(d)), Err(RecoveryError::Invalid) => Ok(Self::Invalid), @@ -305,6 +297,9 @@ struct State { /// An LRU cache of recently recovered data. availability_lru: LruMap, + + /// Cached runtime info. + runtime_info: RuntimeInfo, } impl Default for State { @@ -313,6 +308,7 @@ impl Default for State { ongoing_recoveries: FuturesUnordered::new(), live_block: (0, Hash::default()), availability_lru: LruMap::new(ByLength::new(LRU_SIZE)), + runtime_info: RuntimeInfo::new(None), } } } @@ -329,9 +325,10 @@ impl AvailabilityRecoverySubsystem { } /// Handles a signal from the overseer. -async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemResult { +/// Returns true if subsystem receives a deadly signal. +async fn handle_signal(state: &mut State, signal: OverseerSignal) -> bool { match signal { - OverseerSignal::Conclude => Ok(true), + OverseerSignal::Conclude => true, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. }) => { // if activated is non-empty, set state.live_block to the highest block in `activated` if let Some(activated) = activated { @@ -340,9 +337,9 @@ async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemRe } } - Ok(false) + false }, - OverseerSignal::BlockFinalized(_, _) => Ok(false), + OverseerSignal::BlockFinalized(_, _) => false, } } @@ -351,27 +348,11 @@ async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemRe async fn launch_recovery_task( state: &mut State, ctx: &mut Context, - session_info: SessionInfo, - receipt: CandidateReceipt, - response_sender: oneshot::Sender>, - metrics: &Metrics, + response_sender: oneshot::Sender, recovery_strategies: VecDeque::Sender>>>, - bypass_availability_store: bool, - post_recovery_check: PostRecoveryCheck, -) -> error::Result<()> { - let candidate_hash = receipt.hash(); - let params = RecoveryParams { - validator_authority_keys: session_info.discovery_keys.clone(), - n_validators: session_info.validators.len(), - threshold: recovery_threshold(session_info.validators.len())?, - candidate_hash, - erasure_root: receipt.descriptor.erasure_root, - metrics: metrics.clone(), - bypass_availability_store, - post_recovery_check, - pov_hash: receipt.descriptor.pov_hash, - }; - + params: RecoveryParams, +) -> Result<()> { + let candidate_hash = params.candidate_hash; let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); let (remote, remote_handle) = recovery_task.run().remote_handle(); @@ -382,15 +363,8 @@ async fn launch_recovery_task( awaiting: vec![response_sender], }); - if let Err(e) = ctx.spawn("recovery-task", Box::pin(remote)) { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Failed to spawn a recovery task", - ); - } - - Ok(()) + ctx.spawn("recovery-task", Box::pin(remote)) + .map_err(|err| Error::SpawnTask(err)) } /// Handles an availability recovery request. @@ -401,13 +375,16 @@ async fn handle_recover( receipt: CandidateReceipt, session_index: SessionIndex, backing_group: Option, - response_sender: oneshot::Sender>, + response_sender: oneshot::Sender, metrics: &Metrics, erasure_task_tx: futures::channel::mpsc::Sender, recovery_strategy_kind: RecoveryStrategyKind, bypass_availability_store: bool, post_recovery_check: PostRecoveryCheck, -) -> error::Result<()> { + maybe_core_index: Option, + req_v1_protocol_name: ProtocolName, + req_v2_protocol_name: ProtocolName, +) -> Result<()> { let candidate_hash = receipt.hash(); let span = jaeger::Span::new(candidate_hash, "availability-recovery") @@ -416,14 +393,7 @@ async fn handle_recover( if let Some(result) = state.availability_lru.get(&candidate_hash).cloned().map(|v| v.into_result()) { - if let Err(e) = response_sender.send(result) { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Error responding with an availability recovery result", - ); - } - return Ok(()) + return response_sender.send(result).map_err(|_| Error::CanceledResponseSender) } if let Some(i) = @@ -434,100 +404,182 @@ async fn handle_recover( } let _span = span.child("not-cached"); - let session_info = request_session_info(state.live_block.1, session_index, ctx.sender()) - .await - .await - .map_err(error::Error::CanceledSessionInfo)??; + let session_info_res = state + .runtime_info + .get_session_info_by_index(ctx.sender(), state.live_block.1, session_index) + .await; let _span = span.child("session-info-ctx-received"); - match session_info { - Some(session_info) => { + match session_info_res { + Ok(ExtendedSessionInfo { session_info, node_features, .. }) => { + let mut backer_group = None; + let n_validators = session_info.validators.len(); + let systematic_threshold = systematic_recovery_threshold(n_validators)?; let mut recovery_strategies: VecDeque< Box::Sender>>, - > = VecDeque::with_capacity(2); + > = VecDeque::with_capacity(3); if let Some(backing_group) = backing_group { if let Some(backing_validators) = session_info.validator_groups.get(backing_group) { let mut small_pov_size = true; - if let RecoveryStrategyKind::BackersFirstIfSizeLower(fetch_chunks_threshold) = - recovery_strategy_kind - { - // Get our own chunk size to get an estimate of the PoV size. - let chunk_size: Result, error::Error> = - query_chunk_size(ctx, candidate_hash).await; - if let Ok(Some(chunk_size)) = chunk_size { - let pov_size_estimate = - chunk_size.saturating_mul(session_info.validators.len()) / 3; - small_pov_size = pov_size_estimate < fetch_chunks_threshold; - - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - pov_size_estimate, - fetch_chunks_threshold, - enabled = small_pov_size, - "Prefer fetch from backing group", - ); - } else { - // we have a POV limit but were not able to query the chunk size, so - // don't use the backing group. - small_pov_size = false; - } + match recovery_strategy_kind { + RecoveryStrategyKind::BackersFirstIfSizeLower(fetch_chunks_threshold) | + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks( + fetch_chunks_threshold, + ) => { + // Get our own chunk size to get an estimate of the PoV size. + let chunk_size: Result> = + query_chunk_size(ctx, candidate_hash).await; + if let Ok(Some(chunk_size)) = chunk_size { + let pov_size_estimate = chunk_size * systematic_threshold; + small_pov_size = pov_size_estimate < fetch_chunks_threshold; + + if small_pov_size { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + pov_size_estimate, + fetch_chunks_threshold, + "Prefer fetch from backing group", + ); + } + } else { + // we have a POV limit but were not able to query the chunk size, so + // don't use the backing group. + small_pov_size = false; + } + }, + _ => {}, }; match (&recovery_strategy_kind, small_pov_size) { (RecoveryStrategyKind::BackersFirstAlways, _) | - (RecoveryStrategyKind::BackersFirstIfSizeLower(_), true) => recovery_strategies.push_back( - Box::new(FetchFull::new(FetchFullParams { - validators: backing_validators.to_vec(), - erasure_task_tx: erasure_task_tx.clone(), - })), - ), + (RecoveryStrategyKind::BackersFirstIfSizeLower(_), true) | + ( + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks(_), + true, + ) | + (RecoveryStrategyKind::BackersThenSystematicChunks, _) => + recovery_strategies.push_back(Box::new(FetchFull::new( + FetchFullParams { validators: backing_validators.to_vec() }, + ))), _ => {}, }; + + backer_group = Some(backing_validators); + } + } + + let chunk_mapping_enabled = if let Some(&true) = node_features + .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) + .as_deref() + { + true + } else { + false + }; + + // We can only attempt systematic recovery if we received the core index of the + // candidate and chunk mapping is enabled. + if let Some(core_index) = maybe_core_index { + if matches!( + recovery_strategy_kind, + RecoveryStrategyKind::BackersThenSystematicChunks | + RecoveryStrategyKind::SystematicChunks | + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks(_) + ) && chunk_mapping_enabled + { + let chunk_indices = + availability_chunk_indices(Some(node_features), n_validators, core_index)?; + + let chunk_indices: VecDeque<_> = chunk_indices + .iter() + .enumerate() + .map(|(v_index, c_index)| { + ( + *c_index, + ValidatorIndex( + u32::try_from(v_index) + .expect("validator count should not exceed u32"), + ), + ) + }) + .collect(); + + // Only get the validators according to the threshold. + let validators = chunk_indices + .clone() + .into_iter() + .filter(|(c_index, _)| { + usize::try_from(c_index.0) + .expect("usize is at least u32 bytes on all modern targets.") < + systematic_threshold + }) + .collect(); + + recovery_strategies.push_back(Box::new(FetchSystematicChunks::new( + FetchSystematicChunksParams { + validators, + backers: backer_group.map(|v| v.to_vec()).unwrap_or_else(|| vec![]), + }, + ))); } } recovery_strategies.push_back(Box::new(FetchChunks::new(FetchChunksParams { n_validators: session_info.validators.len(), - erasure_task_tx, }))); + let session_info = session_info.clone(); + + let n_validators = session_info.validators.len(); + launch_recovery_task( state, ctx, - session_info, - receipt, response_sender, - metrics, recovery_strategies, - bypass_availability_store, - post_recovery_check, + RecoveryParams { + validator_authority_keys: session_info.discovery_keys.clone(), + n_validators, + threshold: recovery_threshold(n_validators)?, + systematic_threshold, + candidate_hash, + erasure_root: receipt.descriptor.erasure_root, + metrics: metrics.clone(), + bypass_availability_store, + post_recovery_check, + pov_hash: receipt.descriptor.pov_hash, + req_v1_protocol_name, + req_v2_protocol_name, + chunk_mapping_enabled, + erasure_task_tx, + }, ) .await }, - None => { - gum::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block); + Err(_) => { response_sender .send(Err(RecoveryError::Unavailable)) - .map_err(|_| error::Error::CanceledResponseSender)?; - Ok(()) + .map_err(|_| Error::CanceledResponseSender)?; + + Err(Error::SessionInfoUnavailable(state.live_block.1)) }, } } -/// Queries a chunk from av-store. +/// Queries the full `AvailableData` from av-store. #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] async fn query_full_data( ctx: &mut Context, candidate_hash: CandidateHash, -) -> error::Result> { +) -> Result> { let (tx, rx) = oneshot::channel(); ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx)) .await; - rx.await.map_err(error::Error::CanceledQueryFullData) + rx.await.map_err(Error::CanceledQueryFullData) } /// Queries a chunk from av-store. @@ -535,12 +587,12 @@ async fn query_full_data( async fn query_chunk_size( ctx: &mut Context, candidate_hash: CandidateHash, -) -> error::Result> { +) -> Result> { let (tx, rx) = oneshot::channel(); ctx.send_message(AvailabilityStoreMessage::QueryChunkSize(candidate_hash, tx)) .await; - rx.await.map_err(error::Error::CanceledQueryFullData) + rx.await.map_err(Error::CanceledQueryFullData) } #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] @@ -551,6 +603,7 @@ impl AvailabilityRecoverySubsystem { pub fn for_collator( fetch_chunks_threshold: Option, req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, metrics: Metrics, ) -> Self { Self { @@ -561,58 +614,67 @@ impl AvailabilityRecoverySubsystem { post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, metrics, + req_v1_protocol_name: req_protocol_names + .get_name(request_v1::ChunkFetchingRequest::PROTOCOL), + req_v2_protocol_name: req_protocol_names + .get_name(request_v2::ChunkFetchingRequest::PROTOCOL), } } - /// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to - /// request data from backers. - pub fn with_fast_path( - req_receiver: IncomingRequestReceiver, - metrics: Metrics, - ) -> Self { - Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, - bypass_availability_store: false, - post_recovery_check: PostRecoveryCheck::Reencode, - req_receiver, - metrics, - } - } - - /// Create a new instance of `AvailabilityRecoverySubsystem` which requests only chunks - pub fn with_chunks_only( + /// Create an optimised new instance of `AvailabilityRecoverySubsystem` suitable for validator + /// nodes, which: + /// - for small POVs (over the `fetch_chunks_threshold` or the + /// `CONSERVATIVE_FETCH_CHUNKS_THRESHOLD`), it attempts full recovery from backers, if backing + /// group supplied. + /// - for large POVs, attempts systematic recovery, if core_index supplied and + /// AvailabilityChunkMapping node feature is enabled. + /// - as a last resort, attempt regular chunk recovery from all validators. + pub fn for_validator( + fetch_chunks_threshold: Option, req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, metrics: Metrics, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, + recovery_strategy_kind: + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks( + fetch_chunks_threshold.unwrap_or(CONSERVATIVE_FETCH_CHUNKS_THRESHOLD), + ), bypass_availability_store: false, post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, + req_v1_protocol_name: req_protocol_names + .get_name(request_v1::ChunkFetchingRequest::PROTOCOL), + req_v2_protocol_name: req_protocol_names + .get_name(request_v2::ChunkFetchingRequest::PROTOCOL), } } - /// Create a new instance of `AvailabilityRecoverySubsystem` which requests chunks if PoV is - /// above a threshold. - pub fn with_chunks_if_pov_large( - fetch_chunks_threshold: Option, + /// Customise the recovery strategy kind + /// Currently only useful for tests. + #[cfg(any(test, feature = "subsystem-benchmarks"))] + pub fn with_recovery_strategy_kind( req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, metrics: Metrics, + recovery_strategy_kind: RecoveryStrategyKind, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower( - fetch_chunks_threshold.unwrap_or(CONSERVATIVE_FETCH_CHUNKS_THRESHOLD), - ), + recovery_strategy_kind, bypass_availability_store: false, post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, + req_v1_protocol_name: req_protocol_names + .get_name(request_v1::ChunkFetchingRequest::PROTOCOL), + req_v2_protocol_name: req_protocol_names + .get_name(request_v2::ChunkFetchingRequest::PROTOCOL), } } /// Starts the inner subsystem loop. - pub async fn run(self, mut ctx: Context) -> SubsystemResult<()> { + pub async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { let mut state = State::default(); let Self { mut req_receiver, @@ -620,6 +682,8 @@ impl AvailabilityRecoverySubsystem { recovery_strategy_kind, bypass_availability_store, post_recovery_check, + req_v1_protocol_name, + req_v2_protocol_name, } = self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); @@ -655,53 +719,44 @@ impl AvailabilityRecoverySubsystem { loop { let recv_req = req_receiver.recv(|| vec![COST_INVALID_REQUEST]).fuse(); pin_mut!(recv_req); - futures::select! { + let res = futures::select! { erasure_task = erasure_task_rx.next() => { match erasure_task { Some(task) => { - let send_result = to_pool + to_pool .next() .expect("Pool size is `NonZeroUsize`; qed") .send(task) .await - .map_err(|_| RecoveryError::ChannelClosed); - - if let Err(err) = send_result { - gum::warn!( - target: LOG_TARGET, - ?err, - "Failed to send erasure coding task", - ); - } + .map_err(|_| RecoveryError::ChannelClosed) }, None => { - gum::debug!( - target: LOG_TARGET, - "Erasure task channel closed", - ); - - return Err(SubsystemError::with_origin("availability-recovery", RecoveryError::ChannelClosed)) + Err(RecoveryError::ChannelClosed) } - } + }.map_err(Into::into) } - v = ctx.recv().fuse() => { - match v? { - FromOrchestra::Signal(signal) => if handle_signal( - &mut state, - signal, - ).await? { - gum::debug!(target: LOG_TARGET, "subsystem concluded"); - return Ok(()); - } - FromOrchestra::Communication { msg } => { - match msg { - AvailabilityRecoveryMessage::RecoverAvailableData( - receipt, - session_index, - maybe_backing_group, - response_sender, - ) => { - if let Err(e) = handle_recover( + signal = ctx.recv().fuse() => { + match signal { + Ok(signal) => { + match signal { + FromOrchestra::Signal(signal) => if handle_signal( + &mut state, + signal, + ).await { + gum::debug!(target: LOG_TARGET, "subsystem concluded"); + return Ok(()); + } else { + Ok(()) + }, + FromOrchestra::Communication { + msg: AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + maybe_backing_group, + maybe_core_index, + response_sender, + ) + } => handle_recover( &mut state, &mut ctx, receipt, @@ -712,21 +767,18 @@ impl AvailabilityRecoverySubsystem { erasure_task_tx.clone(), recovery_strategy_kind.clone(), bypass_availability_store, - post_recovery_check.clone() - ).await { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Error handling a recovery request", - ); - } - } + post_recovery_check.clone(), + maybe_core_index, + req_v1_protocol_name.clone(), + req_v2_protocol_name.clone(), + ).await } - } + }, + Err(e) => Err(Error::SubsystemReceive(e)) } } in_req = recv_req => { - match in_req.into_nested().map_err(|fatal| SubsystemError::with_origin("availability-recovery", fatal))? { + match in_req { Ok(req) => { if bypass_availability_store { gum::debug!( @@ -734,40 +786,42 @@ impl AvailabilityRecoverySubsystem { "Skipping request to availability-store.", ); let _ = req.send_response(None.into()); - continue - } - match query_full_data(&mut ctx, req.payload.candidate_hash).await { - Ok(res) => { - let _ = req.send_response(res.into()); - } - Err(e) => { - gum::debug!( - target: LOG_TARGET, - err = ?e, - "Failed to query available data.", - ); - - let _ = req.send_response(None.into()); + Ok(()) + } else { + match query_full_data(&mut ctx, req.payload.candidate_hash).await { + Ok(res) => { + let _ = req.send_response(res.into()); + Ok(()) + } + Err(e) => { + let _ = req.send_response(None.into()); + Err(e) + } } } } - Err(jfyi) => { - gum::debug!( - target: LOG_TARGET, - error = ?jfyi, - "Decoding incoming request failed" - ); - continue - } + Err(e) => Err(Error::IncomingRequest(e)) } } output = state.ongoing_recoveries.select_next_some() => { + let mut res = Ok(()); if let Some((candidate_hash, result)) = output { + if let Err(ref e) = result { + res = Err(Error::Recovery(e.clone())); + } + if let Ok(recovery) = CachedRecovery::try_from(result) { state.availability_lru.insert(candidate_hash, recovery); } } + + res } + }; + + // Only bubble up fatal errors, but log all of them. + if let Err(e) = res { + log_error(Err(e))?; } } } @@ -835,7 +889,13 @@ async fn erasure_task_thread( Some(ErasureTask::Reconstruct(n_validators, chunks, sender)) => { let _ = sender.send(polkadot_erasure_coding::reconstruct_v1( n_validators, - chunks.values().map(|c| (&c.chunk[..], c.index.0 as usize)), + chunks.iter().map(|(c_index, chunk)| { + ( + &chunk[..], + usize::try_from(c_index.0) + .expect("usize is at least u32 bytes on all modern targets."), + ) + }), )); }, Some(ErasureTask::Reencode(n_validators, root, available_data, sender)) => { diff --git a/polkadot/node/network/availability-recovery/src/metrics.rs b/polkadot/node/network/availability-recovery/src/metrics.rs index 9f4cddc57e4..4e269df5502 100644 --- a/polkadot/node/network/availability-recovery/src/metrics.rs +++ b/polkadot/node/network/availability-recovery/src/metrics.rs @@ -14,9 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use polkadot_node_subsystem::prometheus::HistogramVec; use polkadot_node_subsystem_util::metrics::{ self, - prometheus::{self, Counter, CounterVec, Histogram, Opts, PrometheusError, Registry, U64}, + prometheus::{ + self, prometheus::HistogramTimer, Counter, CounterVec, Histogram, Opts, PrometheusError, + Registry, U64, + }, }; /// Availability Distribution metrics. @@ -28,26 +32,61 @@ struct MetricsInner { /// Number of sent chunk requests. /// /// Gets incremented on each sent chunk requests. - chunk_requests_issued: Counter, + /// + /// Split by chunk type: + /// - `regular_chunks` + /// - `systematic_chunks` + chunk_requests_issued: CounterVec, + /// Total number of bytes recovered /// /// Gets incremented on each successful recovery recovered_bytes_total: Counter, + /// A counter for finished chunk requests. /// - /// Split by result: + /// Split by the chunk type (`regular_chunks` or `systematic_chunks`) + /// + /// Also split by result: /// - `no_such_chunk` ... peer did not have the requested chunk /// - `timeout` ... request timed out. - /// - `network_error` ... Some networking issue except timeout + /// - `error` ... Some networking issue except timeout /// - `invalid` ... Chunk was received, but not valid. /// - `success` chunk_requests_finished: CounterVec, + /// A counter for successful chunk requests, split by the network protocol version. + chunk_request_protocols: CounterVec, + + /// Number of sent available data requests. + full_data_requests_issued: Counter, + + /// Counter for finished available data requests. + /// + /// Split by the result type: + /// + /// - `no_such_data` ... peer did not have the requested data + /// - `timeout` ... request timed out. + /// - `error` ... Some networking issue except timeout + /// - `invalid` ... data was received, but not valid. + /// - `success` + full_data_requests_finished: CounterVec, + /// The duration of request to response. - time_chunk_request: Histogram, + /// + /// Split by chunk type (`regular_chunks` or `systematic_chunks`). + time_chunk_request: HistogramVec, /// The duration between the pure recovery and verification. - time_erasure_recovery: Histogram, + /// + /// Split by recovery type (`regular_chunks`, `systematic_chunks` or `full_from_backers`). + time_erasure_recovery: HistogramVec, + + /// How much time it takes to reconstruct the available data from chunks. + /// + /// Split by chunk type (`regular_chunks` or `systematic_chunks`), as the algorithms are + /// different. + time_erasure_reconstruct: HistogramVec, /// How much time it takes to re-encode the data into erasure chunks in order to verify /// the root hash of the provided Merkle tree. See `reconstructed_data_matches_root`. @@ -58,6 +97,10 @@ struct MetricsInner { time_full_recovery: Histogram, /// Number of full recoveries that have been finished one way or the other. + /// + /// Split by recovery `strategy_type` (`full_from_backers, systematic_chunks, regular_chunks, + /// all`). `all` is used for failed recoveries that tried all available strategies. + /// Also split by `result` type. full_recoveries_finished: CounterVec, /// Number of full recoveries that have been started on this subsystem. @@ -73,87 +116,175 @@ impl Metrics { Metrics(None) } - /// Increment counter on fetched labels. - pub fn on_chunk_request_issued(&self) { + /// Increment counter for chunk requests. + pub fn on_chunk_request_issued(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_issued.inc() + metrics.chunk_requests_issued.with_label_values(&[chunk_type]).inc() + } + } + + /// Increment counter for full data requests. + pub fn on_full_request_issued(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_issued.inc() } } /// A chunk request timed out. - pub fn on_chunk_request_timeout(&self) { + pub fn on_chunk_request_timeout(&self, chunk_type: &str) { + if let Some(metrics) = &self.0 { + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "timeout"]) + .inc() + } + } + + /// A full data request timed out. + pub fn on_full_request_timeout(&self) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["timeout"]).inc() + metrics.full_data_requests_finished.with_label_values(&["timeout"]).inc() } } /// A chunk request failed because validator did not have its chunk. - pub fn on_chunk_request_no_such_chunk(&self) { + pub fn on_chunk_request_no_such_chunk(&self, chunk_type: &str) { + if let Some(metrics) = &self.0 { + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "no_such_chunk"]) + .inc() + } + } + + /// A full data request failed because the validator did not have it. + pub fn on_full_request_no_such_data(&self) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["no_such_chunk"]).inc() + metrics.full_data_requests_finished.with_label_values(&["no_such_data"]).inc() } } /// A chunk request failed for some non timeout related network error. - pub fn on_chunk_request_error(&self) { + pub fn on_chunk_request_error(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["error"]).inc() + metrics.chunk_requests_finished.with_label_values(&[chunk_type, "error"]).inc() + } + } + + /// A full data request failed for some non timeout related network error. + pub fn on_full_request_error(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_finished.with_label_values(&["error"]).inc() } } /// A chunk request succeeded, but was not valid. - pub fn on_chunk_request_invalid(&self) { + pub fn on_chunk_request_invalid(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["invalid"]).inc() + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "invalid"]) + .inc() + } + } + + /// A full data request succeeded, but was not valid. + pub fn on_full_request_invalid(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_finished.with_label_values(&["invalid"]).inc() } } /// A chunk request succeeded. - pub fn on_chunk_request_succeeded(&self) { + pub fn on_chunk_request_succeeded(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["success"]).inc() + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "success"]) + .inc() + } + } + + /// A chunk response was received on the v1 protocol. + pub fn on_chunk_response_v1(&self) { + if let Some(metrics) = &self.0 { + metrics.chunk_request_protocols.with_label_values(&["v1"]).inc() + } + } + + /// A chunk response was received on the v2 protocol. + pub fn on_chunk_response_v2(&self) { + if let Some(metrics) = &self.0 { + metrics.chunk_request_protocols.with_label_values(&["v2"]).inc() + } + } + + /// A full data request succeeded. + pub fn on_full_request_succeeded(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_finished.with_label_values(&["success"]).inc() } } /// Get a timer to time request/response duration. - pub fn time_chunk_request(&self) -> Option { - self.0.as_ref().map(|metrics| metrics.time_chunk_request.start_timer()) + pub fn time_chunk_request(&self, chunk_type: &str) -> Option { + self.0.as_ref().map(|metrics| { + metrics.time_chunk_request.with_label_values(&[chunk_type]).start_timer() + }) } /// Get a timer to time erasure code recover. - pub fn time_erasure_recovery(&self) -> Option { - self.0.as_ref().map(|metrics| metrics.time_erasure_recovery.start_timer()) + pub fn time_erasure_recovery(&self, chunk_type: &str) -> Option { + self.0.as_ref().map(|metrics| { + metrics.time_erasure_recovery.with_label_values(&[chunk_type]).start_timer() + }) + } + + /// Get a timer for available data reconstruction. + pub fn time_erasure_reconstruct(&self, chunk_type: &str) -> Option { + self.0.as_ref().map(|metrics| { + metrics.time_erasure_reconstruct.with_label_values(&[chunk_type]).start_timer() + }) } /// Get a timer to time chunk encoding. - pub fn time_reencode_chunks(&self) -> Option { + pub fn time_reencode_chunks(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_reencode_chunks.start_timer()) } /// Get a timer to measure the time of the complete recovery process. - pub fn time_full_recovery(&self) -> Option { + pub fn time_full_recovery(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_full_recovery.start_timer()) } /// A full recovery succeeded. - pub fn on_recovery_succeeded(&self, bytes: usize) { + pub fn on_recovery_succeeded(&self, strategy_type: &str, bytes: usize) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["success"]).inc(); + metrics + .full_recoveries_finished + .with_label_values(&["success", strategy_type]) + .inc(); metrics.recovered_bytes_total.inc_by(bytes as u64) } } /// A full recovery failed (data not available). - pub fn on_recovery_failed(&self) { + pub fn on_recovery_failed(&self, strategy_type: &str) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["failure"]).inc() + metrics + .full_recoveries_finished + .with_label_values(&["failure", strategy_type]) + .inc() } } /// A full recovery failed (data was recovered, but invalid). - pub fn on_recovery_invalid(&self) { + pub fn on_recovery_invalid(&self, strategy_type: &str) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["invalid"]).inc() + metrics + .full_recoveries_finished + .with_label_values(&["invalid", strategy_type]) + .inc() } } @@ -169,9 +300,17 @@ impl metrics::Metrics for Metrics { fn try_register(registry: &Registry) -> Result { let metrics = MetricsInner { chunk_requests_issued: prometheus::register( + CounterVec::new( + Opts::new("polkadot_parachain_availability_recovery_chunk_requests_issued", + "Total number of issued chunk requests."), + &["type"] + )?, + registry, + )?, + full_data_requests_issued: prometheus::register( Counter::new( - "polkadot_parachain_availability_recovery_chunk_requests_issued", - "Total number of issued chunk requests.", + "polkadot_parachain_availability_recovery_full_data_requests_issued", + "Total number of issued full data requests.", )?, registry, )?, @@ -188,22 +327,49 @@ impl metrics::Metrics for Metrics { "polkadot_parachain_availability_recovery_chunk_requests_finished", "Total number of chunk requests finished.", ), + &["result", "type"], + )?, + registry, + )?, + chunk_request_protocols: prometheus::register( + CounterVec::new( + Opts::new( + "polkadot_parachain_availability_recovery_chunk_request_protocols", + "Total number of successful chunk requests, mapped by the protocol version (v1 or v2).", + ), + &["protocol"], + )?, + registry, + )?, + full_data_requests_finished: prometheus::register( + CounterVec::new( + Opts::new( + "polkadot_parachain_availability_recovery_full_data_requests_finished", + "Total number of full data requests finished.", + ), &["result"], )?, registry, )?, time_chunk_request: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + prometheus::HistogramVec::new(prometheus::HistogramOpts::new( "polkadot_parachain_availability_recovery_time_chunk_request", "Time spent waiting for a response to a chunk request", - ))?, + ), &["type"])?, registry, )?, time_erasure_recovery: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + prometheus::HistogramVec::new(prometheus::HistogramOpts::new( "polkadot_parachain_availability_recovery_time_erasure_recovery", "Time spent to recover the erasure code and verify the merkle root by re-encoding as erasure chunks", - ))?, + ), &["type"])?, + registry, + )?, + time_erasure_reconstruct: prometheus::register( + prometheus::HistogramVec::new(prometheus::HistogramOpts::new( + "polkadot_parachain_availability_recovery_time_erasure_reconstruct", + "Time spent to reconstruct the data from chunks", + ), &["type"])?, registry, )?, time_reencode_chunks: prometheus::register( @@ -226,7 +392,7 @@ impl metrics::Metrics for Metrics { "polkadot_parachain_availability_recovery_recoveries_finished", "Total number of recoveries that finished.", ), - &["result"], + &["result", "strategy_type"], )?, registry, )?, diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs deleted file mode 100644 index c300c221da5..00000000000 --- a/polkadot/node/network/availability-recovery/src/task.rs +++ /dev/null @@ -1,861 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Recovery task and associated strategies. - -#![warn(missing_docs)] - -use crate::{ - futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, - PostRecoveryCheck, LOG_TARGET, -}; -use futures::{channel::oneshot, SinkExt}; -use parity_scale_codec::Encode; -#[cfg(not(test))] -use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; -use polkadot_node_network_protocol::request_response::{ - self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, -}; -use polkadot_node_primitives::{AvailableData, ErasureChunk}; -use polkadot_node_subsystem::{ - messages::{AvailabilityStoreMessage, NetworkBridgeTxMessage}, - overseer, RecoveryError, -}; -use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash, ValidatorIndex}; -use rand::seq::SliceRandom; -use sc_network::{IfDisconnected, OutboundFailure, RequestFailure}; -use std::{ - collections::{HashMap, VecDeque}, - time::Duration, -}; - -// How many parallel recovery tasks should be running at once. -const N_PARALLEL: usize = 50; - -/// Time after which we consider a request to have failed -/// -/// and we should try more peers. Note in theory the request times out at the network level, -/// measurements have shown, that in practice requests might actually take longer to fail in -/// certain occasions. (The very least, authority discovery is not part of the timeout.) -/// -/// For the time being this value is the same as the timeout on the networking layer, but as this -/// timeout is more soft than the networking one, it might make sense to pick different values as -/// well. -#[cfg(not(test))] -const TIMEOUT_START_NEW_REQUESTS: Duration = CHUNK_REQUEST_TIMEOUT; -#[cfg(test)] -const TIMEOUT_START_NEW_REQUESTS: Duration = Duration::from_millis(100); - -#[async_trait::async_trait] -/// Common trait for runnable recovery strategies. -pub trait RecoveryStrategy: Send { - /// Main entry point of the strategy. - async fn run( - &mut self, - state: &mut State, - sender: &mut Sender, - common_params: &RecoveryParams, - ) -> Result; - - /// Return the name of the strategy for logging purposes. - fn display_name(&self) -> &'static str; -} - -/// Recovery parameters common to all strategies in a `RecoveryTask`. -pub struct RecoveryParams { - /// Discovery ids of `validators`. - pub validator_authority_keys: Vec, - - /// Number of validators. - pub n_validators: usize, - - /// The number of chunks needed. - pub threshold: usize, - - /// A hash of the relevant candidate. - pub candidate_hash: CandidateHash, - - /// The root of the erasure encoding of the candidate. - pub erasure_root: Hash, - - /// Metrics to report. - pub metrics: Metrics, - - /// Do not request data from availability-store. Useful for collators. - pub bypass_availability_store: bool, - - /// The type of check to perform after available data was recovered. - pub post_recovery_check: PostRecoveryCheck, - - /// The blake2-256 hash of the PoV. - pub pov_hash: Hash, -} - -/// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the -/// same `RecoveryTask`. -pub struct State { - /// Chunks received so far. - received_chunks: HashMap, -} - -impl State { - fn new() -> Self { - Self { received_chunks: HashMap::new() } - } - - fn insert_chunk(&mut self, validator: ValidatorIndex, chunk: ErasureChunk) { - self.received_chunks.insert(validator, chunk); - } - - fn chunk_count(&self) -> usize { - self.received_chunks.len() - } - - /// Retrieve the local chunks held in the av-store (either 0 or 1). - async fn populate_from_av_store( - &mut self, - params: &RecoveryParams, - sender: &mut Sender, - ) -> Vec { - let (tx, rx) = oneshot::channel(); - sender - .send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx)) - .await; - - match rx.await { - Ok(chunks) => { - // This should either be length 1 or 0. If we had the whole data, - // we wouldn't have reached this stage. - let chunk_indices: Vec<_> = chunks.iter().map(|c| c.index).collect(); - - for chunk in chunks { - if is_chunk_valid(params, &chunk) { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Found valid chunk on disk" - ); - self.insert_chunk(chunk.index, chunk); - } else { - gum::error!( - target: LOG_TARGET, - "Loaded invalid chunk from disk! Disk/Db corruption _very_ likely - please fix ASAP!" - ); - }; - } - - chunk_indices - }, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - "Failed to reach the availability store" - ); - - vec![] - }, - } - } - - /// Launch chunk requests in parallel, according to the parameters. - async fn launch_parallel_chunk_requests( - &mut self, - params: &RecoveryParams, - sender: &mut Sender, - desired_requests_count: usize, - validators: &mut VecDeque, - requesting_chunks: &mut FuturesUndead< - Result, (ValidatorIndex, RequestError)>, - >, - ) where - Sender: overseer::AvailabilityRecoverySenderTrait, - { - let candidate_hash = ¶ms.candidate_hash; - let already_requesting_count = requesting_chunks.len(); - - let mut requests = Vec::with_capacity(desired_requests_count - already_requesting_count); - - while requesting_chunks.len() < desired_requests_count { - if let Some(validator_index) = validators.pop_back() { - let validator = params.validator_authority_keys[validator_index.0 as usize].clone(); - gum::trace!( - target: LOG_TARGET, - ?validator, - ?validator_index, - ?candidate_hash, - "Requesting chunk", - ); - - // Request data. - let raw_request = req_res::v1::ChunkFetchingRequest { - candidate_hash: params.candidate_hash, - index: validator_index, - }; - - let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request); - requests.push(Requests::ChunkFetchingV1(req)); - - params.metrics.on_chunk_request_issued(); - let timer = params.metrics.time_chunk_request(); - - requesting_chunks.push(Box::pin(async move { - let _timer = timer; - match res.await { - Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk)) => - Ok(Some(chunk.recombine_into_chunk(&raw_request))), - Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => Ok(None), - Err(e) => Err((validator_index, e)), - } - })); - } else { - break - } - } - - sender - .send_message(NetworkBridgeTxMessage::SendRequests( - requests, - IfDisconnected::TryConnect, - )) - .await; - } - - /// Wait for a sufficient amount of chunks to reconstruct according to the provided `params`. - async fn wait_for_chunks( - &mut self, - params: &RecoveryParams, - validators: &mut VecDeque, - requesting_chunks: &mut FuturesUndead< - Result, (ValidatorIndex, RequestError)>, - >, - can_conclude: impl Fn(usize, usize, usize, &RecoveryParams, usize) -> bool, - ) -> (usize, usize) { - let metrics = ¶ms.metrics; - - let mut total_received_responses = 0; - let mut error_count = 0; - - // Wait for all current requests to conclude or time-out, or until we reach enough chunks. - // We also declare requests undead, once `TIMEOUT_START_NEW_REQUESTS` is reached and will - // return in that case for `launch_parallel_requests` to fill up slots again. - while let Some(request_result) = - requesting_chunks.next_with_timeout(TIMEOUT_START_NEW_REQUESTS).await - { - total_received_responses += 1; - - match request_result { - Ok(Some(chunk)) => - if is_chunk_valid(params, &chunk) { - metrics.on_chunk_request_succeeded(); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Received valid chunk", - ); - self.insert_chunk(chunk.index, chunk); - } else { - metrics.on_chunk_request_invalid(); - error_count += 1; - }, - Ok(None) => { - metrics.on_chunk_request_no_such_chunk(); - error_count += 1; - }, - Err((validator_index, e)) => { - error_count += 1; - - gum::trace!( - target: LOG_TARGET, - candidate_hash= ?params.candidate_hash, - err = ?e, - ?validator_index, - "Failure requesting chunk", - ); - - match e { - RequestError::InvalidResponse(_) => { - metrics.on_chunk_request_invalid(); - - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - err = ?e, - ?validator_index, - "Chunk fetching response was invalid", - ); - }, - RequestError::NetworkError(err) => { - // No debug logs on general network errors - that became very spammy - // occasionally. - if let RequestFailure::Network(OutboundFailure::Timeout) = err { - metrics.on_chunk_request_timeout(); - } else { - metrics.on_chunk_request_error(); - } - - validators.push_front(validator_index); - }, - RequestError::Canceled(_) => { - metrics.on_chunk_request_error(); - - validators.push_front(validator_index); - }, - } - }, - } - - // Stop waiting for requests when we either can already recover the data - // or have gotten firm 'No' responses from enough validators. - if can_conclude( - validators.len(), - requesting_chunks.total_len(), - self.chunk_count(), - params, - error_count, - ) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - received_chunks_count = ?self.chunk_count(), - requested_chunks_count = ?requesting_chunks.len(), - threshold = ?params.threshold, - "Can conclude availability for a candidate", - ); - break - } - } - - (total_received_responses, error_count) - } -} - -/// A stateful reconstruction of availability data in reference to -/// a candidate hash. -pub struct RecoveryTask { - sender: Sender, - params: RecoveryParams, - strategies: VecDeque>>, - state: State, -} - -impl RecoveryTask -where - Sender: overseer::AvailabilityRecoverySenderTrait, -{ - /// Instantiate a new recovery task. - pub fn new( - sender: Sender, - params: RecoveryParams, - strategies: VecDeque>>, - ) -> Self { - Self { sender, params, strategies, state: State::new() } - } - - async fn in_availability_store(&mut self) -> Option { - if !self.params.bypass_availability_store { - let (tx, rx) = oneshot::channel(); - self.sender - .send_message(AvailabilityStoreMessage::QueryAvailableData( - self.params.candidate_hash, - tx, - )) - .await; - - match rx.await { - Ok(Some(data)) => return Some(data), - Ok(None) => {}, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Failed to reach the availability store", - ) - }, - } - } - - None - } - - /// Run this recovery task to completion. It will loop through the configured strategies - /// in-order and return whenever the first one recovers the full `AvailableData`. - pub async fn run(mut self) -> Result { - if let Some(data) = self.in_availability_store().await { - return Ok(data) - } - - self.params.metrics.on_recovery_started(); - - let _timer = self.params.metrics.time_full_recovery(); - - while let Some(mut current_strategy) = self.strategies.pop_front() { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Starting `{}` strategy", - current_strategy.display_name(), - ); - - let res = current_strategy.run(&mut self.state, &mut self.sender, &self.params).await; - - match res { - Err(RecoveryError::Unavailable) => - if self.strategies.front().is_some() { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Recovery strategy `{}` did not conclude. Trying the next one.", - current_strategy.display_name(), - ); - continue - }, - Err(err) => { - match &err { - RecoveryError::Invalid => self.params.metrics.on_recovery_invalid(), - _ => self.params.metrics.on_recovery_failed(), - } - return Err(err) - }, - Ok(data) => { - self.params.metrics.on_recovery_succeeded(data.encoded_size()); - return Ok(data) - }, - } - } - - // We have no other strategies to try. - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Recovery of available data failed.", - ); - self.params.metrics.on_recovery_failed(); - - Err(RecoveryError::Unavailable) - } -} - -/// `RecoveryStrategy` that sequentially tries to fetch the full `AvailableData` from -/// already-connected validators in the configured validator set. -pub struct FetchFull { - params: FetchFullParams, -} - -pub struct FetchFullParams { - /// Validators that will be used for fetching the data. - pub validators: Vec, - /// Channel to the erasure task handler. - pub erasure_task_tx: futures::channel::mpsc::Sender, -} - -impl FetchFull { - /// Create a new `FetchFull` recovery strategy. - pub fn new(mut params: FetchFullParams) -> Self { - params.validators.shuffle(&mut rand::thread_rng()); - Self { params } - } -} - -#[async_trait::async_trait] -impl RecoveryStrategy for FetchFull { - fn display_name(&self) -> &'static str { - "Full recovery from backers" - } - - async fn run( - &mut self, - _: &mut State, - sender: &mut Sender, - common_params: &RecoveryParams, - ) -> Result { - loop { - // Pop the next validator, and proceed to next fetch_chunks_task if we're out. - let validator_index = - self.params.validators.pop().ok_or_else(|| RecoveryError::Unavailable)?; - - // Request data. - let (req, response) = OutgoingRequest::new( - Recipient::Authority( - common_params.validator_authority_keys[validator_index.0 as usize].clone(), - ), - req_res::v1::AvailableDataFetchingRequest { - candidate_hash: common_params.candidate_hash, - }, - ); - - sender - .send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AvailableDataFetchingV1(req)], - IfDisconnected::ImmediateError, - )) - .await; - - match response.await { - Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let maybe_data = match common_params.post_recovery_check { - PostRecoveryCheck::Reencode => { - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.params - .erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? - }, - PostRecoveryCheck::PovHash => - (data.pov.hash() == common_params.pov_hash).then_some(data), - }; - - match maybe_data { - Some(data) => { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - "Received full data", - ); - - return Ok(data) - }, - None => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - "Invalid data response", - ); - - // it doesn't help to report the peer with req/res. - // we'll try the next backer. - }, - }; - }, - Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, - Err(e) => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - err = ?e, - "Error fetching full available data." - ), - } - } - } -} - -/// `RecoveryStrategy` that requests chunks from validators, in parallel. -pub struct FetchChunks { - /// How many requests have been unsuccessful so far. - error_count: usize, - /// Total number of responses that have been received, including failed ones. - total_received_responses: usize, - /// Collection of in-flight requests. - requesting_chunks: FuturesUndead, (ValidatorIndex, RequestError)>>, - /// A random shuffling of the validators which indicates the order in which we connect to the - /// validators and request the chunk from them. - validators: VecDeque, - /// Channel to the erasure task handler. - erasure_task_tx: futures::channel::mpsc::Sender, -} - -/// Parameters specific to the `FetchChunks` strategy. -pub struct FetchChunksParams { - /// Total number of validators. - pub n_validators: usize, - /// Channel to the erasure task handler. - pub erasure_task_tx: futures::channel::mpsc::Sender, -} - -impl FetchChunks { - /// Instantiate a new strategy. - pub fn new(params: FetchChunksParams) -> Self { - let mut shuffling: Vec<_> = (0..params.n_validators) - .map(|i| ValidatorIndex(i.try_into().expect("number of validators must fit in a u32"))) - .collect(); - shuffling.shuffle(&mut rand::thread_rng()); - - Self { - error_count: 0, - total_received_responses: 0, - requesting_chunks: FuturesUndead::new(), - validators: shuffling.into(), - erasure_task_tx: params.erasure_task_tx, - } - } - - fn is_unavailable( - unrequested_validators: usize, - in_flight_requests: usize, - chunk_count: usize, - threshold: usize, - ) -> bool { - is_unavailable(chunk_count, in_flight_requests, unrequested_validators, threshold) - } - - /// Desired number of parallel requests. - /// - /// For the given threshold (total required number of chunks) get the desired number of - /// requests we want to have running in parallel at this time. - fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { - // Upper bound for parallel requests. - // We want to limit this, so requests can be processed within the timeout and we limit the - // following feedback loop: - // 1. Requests fail due to timeout - // 2. We request more chunks to make up for it - // 3. Bandwidth is spread out even more, so we get even more timeouts - // 4. We request more chunks to make up for it ... - let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); - // How many chunks are still needed? - let remaining_chunks = threshold.saturating_sub(chunk_count); - // What is the current error rate, so we can make up for it? - let inv_error_rate = - self.total_received_responses.checked_div(self.error_count).unwrap_or(0); - // Actual number of requests we want to have in flight in parallel: - std::cmp::min( - max_requests_boundary, - remaining_chunks + remaining_chunks.checked_div(inv_error_rate).unwrap_or(0), - ) - } - - async fn attempt_recovery( - &mut self, - state: &mut State, - common_params: &RecoveryParams, - ) -> Result { - let recovery_duration = common_params.metrics.time_erasure_recovery(); - - // Send request to reconstruct available data from chunks. - let (avilable_data_tx, available_data_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reconstruct( - common_params.n_validators, - // Safe to leave an empty vec in place, as we're stopping the recovery process if - // this reconstruct fails. - std::mem::take(&mut state.received_chunks), - avilable_data_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let available_data_response = - available_data_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - match available_data_response { - Ok(data) => { - let maybe_data = match common_params.post_recovery_check { - PostRecoveryCheck::Reencode => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.or_else(|| { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery error - root mismatch", - ); - None - }) - }, - PostRecoveryCheck::PovHash => - (data.pov.hash() == common_params.pov_hash).then_some(data).or_else(|| { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - pov_hash = ?common_params.pov_hash, - "Data recovery error - PoV hash mismatch", - ); - None - }), - }; - - if let Some(data) = maybe_data { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery from chunks complete", - ); - - Ok(data) - } else { - recovery_duration.map(|rd| rd.stop_and_discard()); - - Err(RecoveryError::Invalid) - } - }, - Err(err) => { - recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - ?err, - "Data recovery error ", - ); - - Err(RecoveryError::Invalid) - }, - } - } -} - -#[async_trait::async_trait] -impl RecoveryStrategy for FetchChunks { - fn display_name(&self) -> &'static str { - "Fetch chunks" - } - - async fn run( - &mut self, - state: &mut State, - sender: &mut Sender, - common_params: &RecoveryParams, - ) -> Result { - // First query the store for any chunks we've got. - if !common_params.bypass_availability_store { - let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; - self.validators.retain(|i| !local_chunk_indices.contains(i)); - } - - // No need to query the validators that have the chunks we already received. - self.validators.retain(|i| !state.received_chunks.contains_key(i)); - - loop { - // If received_chunks has more than threshold entries, attempt to recover the data. - // If that fails, or a re-encoding of it doesn't match the expected erasure root, - // return Err(RecoveryError::Invalid). - // Do this before requesting any chunks because we may have enough of them coming from - // past RecoveryStrategies. - if state.chunk_count() >= common_params.threshold { - return self.attempt_recovery(state, common_params).await - } - - if Self::is_unavailable( - self.validators.len(), - self.requesting_chunks.total_len(), - state.chunk_count(), - common_params.threshold, - ) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - received = %state.chunk_count(), - requesting = %self.requesting_chunks.len(), - total_requesting = %self.requesting_chunks.total_len(), - n_validators = %common_params.n_validators, - "Data recovery from chunks is not possible", - ); - - return Err(RecoveryError::Unavailable) - } - - let desired_requests_count = - self.get_desired_request_count(state.chunk_count(), common_params.threshold); - let already_requesting_count = self.requesting_chunks.len(); - gum::debug!( - target: LOG_TARGET, - ?common_params.candidate_hash, - ?desired_requests_count, - error_count= ?self.error_count, - total_received = ?self.total_received_responses, - threshold = ?common_params.threshold, - ?already_requesting_count, - "Requesting availability chunks for a candidate", - ); - state - .launch_parallel_chunk_requests( - common_params, - sender, - desired_requests_count, - &mut self.validators, - &mut self.requesting_chunks, - ) - .await; - - let (total_responses, error_count) = state - .wait_for_chunks( - common_params, - &mut self.validators, - &mut self.requesting_chunks, - |unrequested_validators, reqs, chunk_count, params, _error_count| { - chunk_count >= params.threshold || - Self::is_unavailable( - unrequested_validators, - reqs, - chunk_count, - params.threshold, - ) - }, - ) - .await; - - self.total_received_responses += total_responses; - self.error_count += error_count; - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use polkadot_erasure_coding::recovery_threshold; - - #[test] - fn parallel_request_calculation_works_as_expected() { - let num_validators = 100; - let threshold = recovery_threshold(num_validators).unwrap(); - let (erasure_task_tx, _erasure_task_rx) = futures::channel::mpsc::channel(16); - - let mut fetch_chunks_task = - FetchChunks::new(FetchChunksParams { n_validators: 100, erasure_task_tx }); - assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); - fetch_chunks_task.error_count = 1; - fetch_chunks_task.total_received_responses = 1; - // We saturate at threshold (34): - assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); - - fetch_chunks_task.total_received_responses = 2; - // With given error rate - still saturating: - assert_eq!(fetch_chunks_task.get_desired_request_count(1, threshold), threshold); - fetch_chunks_task.total_received_responses += 8; - // error rate: 1/10 - // remaining chunks needed: threshold (34) - 9 - // expected: 24 * (1+ 1/10) = (next greater integer) = 27 - assert_eq!(fetch_chunks_task.get_desired_request_count(9, threshold), 27); - fetch_chunks_task.error_count = 0; - // With error count zero - we should fetch exactly as needed: - assert_eq!(fetch_chunks_task.get_desired_request_count(10, threshold), threshold - 10); - } -} diff --git a/polkadot/node/network/availability-recovery/src/task/mod.rs b/polkadot/node/network/availability-recovery/src/task/mod.rs new file mode 100644 index 00000000000..800a82947d6 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/mod.rs @@ -0,0 +1,197 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Main recovery task logic. Runs recovery strategies. + +#![warn(missing_docs)] + +mod strategy; + +pub use self::strategy::{ + FetchChunks, FetchChunksParams, FetchFull, FetchFullParams, FetchSystematicChunks, + FetchSystematicChunksParams, RecoveryStrategy, State, +}; + +#[cfg(test)] +pub use self::strategy::{REGULAR_CHUNKS_REQ_RETRY_LIMIT, SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT}; + +use crate::{metrics::Metrics, ErasureTask, PostRecoveryCheck, LOG_TARGET}; + +use parity_scale_codec::Encode; +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, overseer, RecoveryError}; +use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; +use sc_network::ProtocolName; + +use futures::channel::{mpsc, oneshot}; +use std::collections::VecDeque; + +/// Recovery parameters common to all strategies in a `RecoveryTask`. +#[derive(Clone)] +pub struct RecoveryParams { + /// Discovery ids of `validators`. + pub validator_authority_keys: Vec, + + /// Number of validators. + pub n_validators: usize, + + /// The number of regular chunks needed. + pub threshold: usize, + + /// The number of systematic chunks needed. + pub systematic_threshold: usize, + + /// A hash of the relevant candidate. + pub candidate_hash: CandidateHash, + + /// The root of the erasure encoding of the candidate. + pub erasure_root: Hash, + + /// Metrics to report. + pub metrics: Metrics, + + /// Do not request data from availability-store. Useful for collators. + pub bypass_availability_store: bool, + + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, + + /// Protocol name for ChunkFetchingV1. + pub req_v1_protocol_name: ProtocolName, + + /// Protocol name for ChunkFetchingV2. + pub req_v2_protocol_name: ProtocolName, + + /// Whether or not chunk mapping is enabled. + pub chunk_mapping_enabled: bool, + + /// Channel to the erasure task handler. + pub erasure_task_tx: mpsc::Sender, +} + +/// A stateful reconstruction of availability data in reference to +/// a candidate hash. +pub struct RecoveryTask { + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + state: State, +} + +impl RecoveryTask +where + Sender: overseer::AvailabilityRecoverySenderTrait, +{ + /// Instantiate a new recovery task. + pub fn new( + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + ) -> Self { + Self { sender, params, strategies, state: State::new() } + } + + async fn in_availability_store(&mut self) -> Option { + if !self.params.bypass_availability_store { + let (tx, rx) = oneshot::channel(); + self.sender + .send_message(AvailabilityStoreMessage::QueryAvailableData( + self.params.candidate_hash, + tx, + )) + .await; + + match rx.await { + Ok(Some(data)) => return Some(data), + Ok(None) => {}, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Failed to reach the availability store", + ) + }, + } + } + + None + } + + /// Run this recovery task to completion. It will loop through the configured strategies + /// in-order and return whenever the first one recovers the full `AvailableData`. + pub async fn run(mut self) -> Result { + if let Some(data) = self.in_availability_store().await { + return Ok(data) + } + + self.params.metrics.on_recovery_started(); + + let _timer = self.params.metrics.time_full_recovery(); + + while let Some(current_strategy) = self.strategies.pop_front() { + let display_name = current_strategy.display_name(); + let strategy_type = current_strategy.strategy_type(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Starting `{}` strategy", + display_name + ); + + let res = current_strategy.run(&mut self.state, &mut self.sender, &self.params).await; + + match res { + Err(RecoveryError::Unavailable) => + if self.strategies.front().is_some() { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Recovery strategy `{}` did not conclude. Trying the next one.", + display_name + ); + continue + }, + Err(err) => { + match &err { + RecoveryError::Invalid => + self.params.metrics.on_recovery_invalid(strategy_type), + _ => self.params.metrics.on_recovery_failed(strategy_type), + } + return Err(err) + }, + Ok(data) => { + self.params.metrics.on_recovery_succeeded(strategy_type, data.encoded_size()); + return Ok(data) + }, + } + } + + // We have no other strategies to try. + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Recovery of available data failed.", + ); + + self.params.metrics.on_recovery_failed("all"); + + Err(RecoveryError::Unavailable) + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs new file mode 100644 index 00000000000..b6376a5b543 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs @@ -0,0 +1,335 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + futures_undead::FuturesUndead, + task::{ + strategy::{ + do_post_recovery_check, is_unavailable, OngoingRequests, N_PARALLEL, + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + }, + RecoveryParams, State, + }, + ErasureTask, RecoveryStrategy, LOG_TARGET, +}; + +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{overseer, RecoveryError}; +use polkadot_primitives::ValidatorIndex; + +use futures::{channel::oneshot, SinkExt}; +use rand::seq::SliceRandom; +use std::collections::VecDeque; + +/// Parameters specific to the `FetchChunks` strategy. +pub struct FetchChunksParams { + pub n_validators: usize, +} + +/// `RecoveryStrategy` that requests chunks from validators, in parallel. +pub struct FetchChunks { + /// How many requests have been unsuccessful so far. + error_count: usize, + /// Total number of responses that have been received, including failed ones. + total_received_responses: usize, + /// A shuffled array of validator indices. + validators: VecDeque, + /// Collection of in-flight requests. + requesting_chunks: OngoingRequests, +} + +impl FetchChunks { + /// Instantiate a new strategy. + pub fn new(params: FetchChunksParams) -> Self { + // Shuffle the validators to make sure that we don't request chunks from the same + // validators over and over. + let mut validators: VecDeque = + (0..params.n_validators).map(|i| ValidatorIndex(i as u32)).collect(); + validators.make_contiguous().shuffle(&mut rand::thread_rng()); + + Self { + error_count: 0, + total_received_responses: 0, + validators, + requesting_chunks: FuturesUndead::new(), + } + } + + fn is_unavailable( + unrequested_validators: usize, + in_flight_requests: usize, + chunk_count: usize, + threshold: usize, + ) -> bool { + is_unavailable(chunk_count, in_flight_requests, unrequested_validators, threshold) + } + + /// Desired number of parallel requests. + /// + /// For the given threshold (total required number of chunks) get the desired number of + /// requests we want to have running in parallel at this time. + fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { + // Upper bound for parallel requests. + // We want to limit this, so requests can be processed within the timeout and we limit the + // following feedback loop: + // 1. Requests fail due to timeout + // 2. We request more chunks to make up for it + // 3. Bandwidth is spread out even more, so we get even more timeouts + // 4. We request more chunks to make up for it ... + let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); + // How many chunks are still needed? + let remaining_chunks = threshold.saturating_sub(chunk_count); + // What is the current error rate, so we can make up for it? + let inv_error_rate = + self.total_received_responses.checked_div(self.error_count).unwrap_or(0); + // Actual number of requests we want to have in flight in parallel: + std::cmp::min( + max_requests_boundary, + remaining_chunks + remaining_chunks.checked_div(inv_error_rate).unwrap_or(0), + ) + } + + async fn attempt_recovery( + &mut self, + state: &mut State, + common_params: &RecoveryParams, + ) -> Result { + let recovery_duration = common_params + .metrics + .time_erasure_recovery(RecoveryStrategy::::strategy_type(self)); + + // Send request to reconstruct available data from chunks. + let (avilable_data_tx, available_data_rx) = oneshot::channel(); + + let mut erasure_task_tx = common_params.erasure_task_tx.clone(); + erasure_task_tx + .send(ErasureTask::Reconstruct( + common_params.n_validators, + // Safe to leave an empty vec in place, as we're stopping the recovery process if + // this reconstruct fails. + std::mem::take(&mut state.received_chunks) + .into_iter() + .map(|(c_index, chunk)| (c_index, chunk.chunk)) + .collect(), + avilable_data_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + let available_data_response = + available_data_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; + + match available_data_response { + // Attempt post-recovery check. + Ok(data) => do_post_recovery_check(common_params, data) + .await + .map_err(|e| { + recovery_duration.map(|rd| rd.stop_and_discard()); + e + }) + .map(|data| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery from chunks complete", + ); + data + }), + Err(err) => { + recovery_duration.map(|rd| rd.stop_and_discard()); + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + ?err, + "Data recovery error", + ); + + Err(RecoveryError::Invalid) + }, + } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy for FetchChunks { + fn display_name(&self) -> &'static str { + "Fetch chunks" + } + + fn strategy_type(&self) -> &'static str { + "regular_chunks" + } + + async fn run( + mut self: Box, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + // First query the store for any chunks we've got. + if !common_params.bypass_availability_store { + let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; + self.validators.retain(|validator_index| { + !local_chunk_indices.iter().any(|(v_index, _)| v_index == validator_index) + }); + } + + // No need to query the validators that have the chunks we already received or that we know + // don't have the data from previous strategies. + self.validators.retain(|v_index| { + !state.received_chunks.values().any(|c| v_index == &c.validator_index) && + state.can_retry_request( + &(common_params.validator_authority_keys[v_index.0 as usize].clone(), *v_index), + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + ) + }); + + // Safe to `take` here, as we're consuming `self` anyway and we're not using the + // `validators` field in other methods. + let mut validators_queue: VecDeque<_> = std::mem::take(&mut self.validators) + .into_iter() + .map(|validator_index| { + ( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + validator_index, + ) + }) + .collect(); + + loop { + // If received_chunks has more than threshold entries, attempt to recover the data. + // If that fails, or a re-encoding of it doesn't match the expected erasure root, + // return Err(RecoveryError::Invalid). + // Do this before requesting any chunks because we may have enough of them coming from + // past RecoveryStrategies. + if state.chunk_count() >= common_params.threshold { + return self.attempt_recovery::(state, common_params).await + } + + if Self::is_unavailable( + validators_queue.len(), + self.requesting_chunks.total_len(), + state.chunk_count(), + common_params.threshold, + ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + received = %state.chunk_count(), + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + "Data recovery from chunks is not possible", + ); + + return Err(RecoveryError::Unavailable) + } + + let desired_requests_count = + self.get_desired_request_count(state.chunk_count(), common_params.threshold); + let already_requesting_count = self.requesting_chunks.len(); + gum::debug!( + target: LOG_TARGET, + ?common_params.candidate_hash, + ?desired_requests_count, + error_count= ?self.error_count, + total_received = ?self.total_received_responses, + threshold = ?common_params.threshold, + ?already_requesting_count, + "Requesting availability chunks for a candidate", + ); + + let strategy_type = RecoveryStrategy::::strategy_type(&*self); + + state + .launch_parallel_chunk_requests( + strategy_type, + common_params, + sender, + desired_requests_count, + &mut validators_queue, + &mut self.requesting_chunks, + ) + .await; + + let (total_responses, error_count) = state + .wait_for_chunks( + strategy_type, + common_params, + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + &mut validators_queue, + &mut self.requesting_chunks, + &mut vec![], + |unrequested_validators, + in_flight_reqs, + chunk_count, + _systematic_chunk_count| { + chunk_count >= common_params.threshold || + Self::is_unavailable( + unrequested_validators, + in_flight_reqs, + chunk_count, + common_params.threshold, + ) + }, + ) + .await; + + self.total_received_responses += total_responses; + self.error_count += error_count; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_erasure_coding::recovery_threshold; + + #[test] + fn test_get_desired_request_count() { + let n_validators = 100; + let threshold = recovery_threshold(n_validators).unwrap(); + + let mut fetch_chunks_task = FetchChunks::new(FetchChunksParams { n_validators }); + assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); + fetch_chunks_task.error_count = 1; + fetch_chunks_task.total_received_responses = 1; + // We saturate at threshold (34): + assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); + + // We saturate at the parallel limit. + assert_eq!(fetch_chunks_task.get_desired_request_count(0, N_PARALLEL + 2), N_PARALLEL); + + fetch_chunks_task.total_received_responses = 2; + // With given error rate - still saturating: + assert_eq!(fetch_chunks_task.get_desired_request_count(1, threshold), threshold); + fetch_chunks_task.total_received_responses = 10; + // error rate: 1/10 + // remaining chunks needed: threshold (34) - 9 + // expected: 24 * (1+ 1/10) = (next greater integer) = 27 + assert_eq!(fetch_chunks_task.get_desired_request_count(9, threshold), 27); + // We saturate at the parallel limit. + assert_eq!(fetch_chunks_task.get_desired_request_count(9, N_PARALLEL + 9), N_PARALLEL); + + fetch_chunks_task.error_count = 0; + // With error count zero - we should fetch exactly as needed: + assert_eq!(fetch_chunks_task.get_desired_request_count(10, threshold), threshold - 10); + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/full.rs b/polkadot/node/network/availability-recovery/src/task/strategy/full.rs new file mode 100644 index 00000000000..1d7fbe8ea3c --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/full.rs @@ -0,0 +1,174 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + task::{RecoveryParams, RecoveryStrategy, State}, + ErasureTask, PostRecoveryCheck, LOG_TARGET, +}; + +use polkadot_node_network_protocol::request_response::{ + self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, +}; +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{messages::NetworkBridgeTxMessage, overseer, RecoveryError}; +use polkadot_primitives::ValidatorIndex; +use sc_network::{IfDisconnected, OutboundFailure, RequestFailure}; + +use futures::{channel::oneshot, SinkExt}; +use rand::seq::SliceRandom; + +/// Parameters specific to the `FetchFull` strategy. +pub struct FetchFullParams { + /// Validators that will be used for fetching the data. + pub validators: Vec, +} + +/// `RecoveryStrategy` that sequentially tries to fetch the full `AvailableData` from +/// already-connected validators in the configured validator set. +pub struct FetchFull { + params: FetchFullParams, +} + +impl FetchFull { + /// Create a new `FetchFull` recovery strategy. + pub fn new(mut params: FetchFullParams) -> Self { + params.validators.shuffle(&mut rand::thread_rng()); + Self { params } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy for FetchFull { + fn display_name(&self) -> &'static str { + "Full recovery from backers" + } + + fn strategy_type(&self) -> &'static str { + "full_from_backers" + } + + async fn run( + mut self: Box, + _: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + let strategy_type = RecoveryStrategy::::strategy_type(&*self); + + loop { + // Pop the next validator. + let validator_index = + self.params.validators.pop().ok_or_else(|| RecoveryError::Unavailable)?; + + // Request data. + let (req, response) = OutgoingRequest::new( + Recipient::Authority( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + ), + req_res::v1::AvailableDataFetchingRequest { + candidate_hash: common_params.candidate_hash, + }, + ); + + sender + .send_message(NetworkBridgeTxMessage::SendRequests( + vec![Requests::AvailableDataFetchingV1(req)], + IfDisconnected::ImmediateError, + )) + .await; + + common_params.metrics.on_full_request_issued(); + + match response.await { + Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { + let recovery_duration = + common_params.metrics.time_erasure_recovery(strategy_type); + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + let mut erasure_task_tx = common_params.erasure_task_tx.clone(); + + erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data), + }; + + match maybe_data { + Some(data) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); + + common_params.metrics.on_full_request_succeeded(); + return Ok(data) + }, + None => { + common_params.metrics.on_full_request_invalid(); + recovery_duration.map(|rd| rd.stop_and_discard()); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + // we'll try the next backer. + }, + } + }, + Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => { + common_params.metrics.on_full_request_no_such_data(); + }, + Err(e) => { + match &e { + RequestError::Canceled(_) => common_params.metrics.on_full_request_error(), + RequestError::InvalidResponse(_) => + common_params.metrics.on_full_request_invalid(), + RequestError::NetworkError(req_failure) => { + if let RequestFailure::Network(OutboundFailure::Timeout) = req_failure { + common_params.metrics.on_full_request_timeout(); + } else { + common_params.metrics.on_full_request_error(); + } + }, + }; + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + err = ?e, + "Error fetching full available data." + ); + }, + } + } + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs new file mode 100644 index 00000000000..fb31ff6aa77 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs @@ -0,0 +1,1558 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Recovery strategies. + +mod chunks; +mod full; +mod systematic; + +pub use self::{ + chunks::{FetchChunks, FetchChunksParams}, + full::{FetchFull, FetchFullParams}, + systematic::{FetchSystematicChunks, FetchSystematicChunksParams}, +}; +use crate::{ + futures_undead::FuturesUndead, ErasureTask, PostRecoveryCheck, RecoveryParams, LOG_TARGET, +}; + +use futures::{channel::oneshot, SinkExt}; +use parity_scale_codec::Decode; +use polkadot_erasure_coding::branch_hash; +#[cfg(not(test))] +use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; +use polkadot_node_network_protocol::request_response::{ + self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, +}; +use polkadot_node_primitives::{AvailableData, ErasureChunk}; +use polkadot_node_subsystem::{ + messages::{AvailabilityStoreMessage, NetworkBridgeTxMessage}, + overseer, RecoveryError, +}; +use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, ChunkIndex, HashT, ValidatorIndex}; +use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + time::Duration, +}; + +// How many parallel chunk fetching requests should be running at once. +const N_PARALLEL: usize = 50; + +/// Time after which we consider a request to have failed +/// +/// and we should try more peers. Note in theory the request times out at the network level, +/// measurements have shown, that in practice requests might actually take longer to fail in +/// certain occasions. (The very least, authority discovery is not part of the timeout.) +/// +/// For the time being this value is the same as the timeout on the networking layer, but as this +/// timeout is more soft than the networking one, it might make sense to pick different values as +/// well. +#[cfg(not(test))] +const TIMEOUT_START_NEW_REQUESTS: Duration = CHUNK_REQUEST_TIMEOUT; +#[cfg(test)] +const TIMEOUT_START_NEW_REQUESTS: Duration = Duration::from_millis(100); + +/// The maximum number of times systematic chunk recovery will try making a request for a given +/// (validator,chunk) pair, if the error was not fatal. Added so that we don't get stuck in an +/// infinite retry loop. +pub const SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT: u32 = 2; +/// The maximum number of times regular chunk recovery will try making a request for a given +/// (validator,chunk) pair, if the error was not fatal. Added so that we don't get stuck in an +/// infinite retry loop. +pub const REGULAR_CHUNKS_REQ_RETRY_LIMIT: u32 = 5; + +// Helpful type alias for tracking ongoing chunk requests. +type OngoingRequests = FuturesUndead<( + AuthorityDiscoveryId, + ValidatorIndex, + Result<(Option, ProtocolName), RequestError>, +)>; + +const fn is_unavailable( + received_chunks: usize, + requesting_chunks: usize, + unrequested_validators: usize, + threshold: usize, +) -> bool { + received_chunks + requesting_chunks + unrequested_validators < threshold +} + +/// Check validity of a chunk. +fn is_chunk_valid(params: &RecoveryParams, chunk: &ErasureChunk) -> bool { + let anticipated_hash = + match branch_hash(¶ms.erasure_root, chunk.proof(), chunk.index.0 as usize) { + Ok(hash) => hash, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + chunk_index = ?chunk.index, + error = ?e, + "Invalid Merkle proof", + ); + return false + }, + }; + let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk); + if anticipated_hash != erasure_chunk_hash { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + chunk_index = ?chunk.index, + "Merkle proof mismatch" + ); + return false + } + true +} + +/// Perform the validity checks after recovery. +async fn do_post_recovery_check( + params: &RecoveryParams, + data: AvailableData, +) -> Result { + let mut erasure_task_tx = params.erasure_task_tx.clone(); + match params.post_recovery_check { + PostRecoveryCheck::Reencode => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + erasure_task_tx + .send(ErasureTask::Reencode( + params.n_validators, + params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.ok_or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + erasure_root = ?params.erasure_root, + "Data recovery error - root mismatch", + ); + RecoveryError::Invalid + }) + }, + PostRecoveryCheck::PovHash => { + let pov = data.pov.clone(); + (pov.hash() == params.pov_hash).then_some(data).ok_or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + expected_pov_hash = ?params.pov_hash, + actual_pov_hash = ?pov.hash(), + "Data recovery error - PoV hash mismatch", + ); + RecoveryError::Invalid + }) + }, + } +} + +#[async_trait::async_trait] +/// Common trait for runnable recovery strategies. +pub trait RecoveryStrategy: Send { + /// Main entry point of the strategy. + async fn run( + mut self: Box, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result; + + /// Return the name of the strategy for logging purposes. + fn display_name(&self) -> &'static str; + + /// Return the strategy type for use as a metric label. + fn strategy_type(&self) -> &'static str; +} + +/// Utility type used for recording the result of requesting a chunk from a validator. +enum ErrorRecord { + NonFatal(u32), + Fatal, +} + +/// Helper struct used for the `received_chunks` mapping. +/// Compared to `ErasureChunk`, it doesn't need to hold the `ChunkIndex` (because it's the key used +/// for the map) and proof, but needs to hold the `ValidatorIndex` instead. +struct Chunk { + /// The erasure-encoded chunk of data belonging to the candidate block. + chunk: Vec, + /// The validator index that corresponds to this chunk. Not always the same as the chunk index. + validator_index: ValidatorIndex, +} + +/// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the +/// same `RecoveryTask`. +pub struct State { + /// Chunks received so far. + /// This MUST be a `BTreeMap` in order for systematic recovery to work (the algorithm assumes + /// that chunks are ordered by their index). If we ever switch this to some non-ordered + /// collection, we need to add a sort step to the systematic recovery. + received_chunks: BTreeMap, + + /// A record of errors returned when requesting a chunk from a validator. + recorded_errors: HashMap<(AuthorityDiscoveryId, ValidatorIndex), ErrorRecord>, +} + +impl State { + pub fn new() -> Self { + Self { received_chunks: BTreeMap::new(), recorded_errors: HashMap::new() } + } + + fn insert_chunk(&mut self, chunk_index: ChunkIndex, chunk: Chunk) { + self.received_chunks.insert(chunk_index, chunk); + } + + fn chunk_count(&self) -> usize { + self.received_chunks.len() + } + + fn systematic_chunk_count(&self, systematic_threshold: usize) -> usize { + self.received_chunks + .range(ChunkIndex(0)..ChunkIndex(systematic_threshold as u32)) + .count() + } + + fn record_error_fatal( + &mut self, + authority_id: AuthorityDiscoveryId, + validator_index: ValidatorIndex, + ) { + self.recorded_errors.insert((authority_id, validator_index), ErrorRecord::Fatal); + } + + fn record_error_non_fatal( + &mut self, + authority_id: AuthorityDiscoveryId, + validator_index: ValidatorIndex, + ) { + self.recorded_errors + .entry((authority_id, validator_index)) + .and_modify(|record| { + if let ErrorRecord::NonFatal(ref mut count) = record { + *count = count.saturating_add(1); + } + }) + .or_insert(ErrorRecord::NonFatal(1)); + } + + fn can_retry_request( + &self, + key: &(AuthorityDiscoveryId, ValidatorIndex), + retry_threshold: u32, + ) -> bool { + match self.recorded_errors.get(key) { + None => true, + Some(entry) => match entry { + ErrorRecord::Fatal => false, + ErrorRecord::NonFatal(count) if *count < retry_threshold => true, + ErrorRecord::NonFatal(_) => false, + }, + } + } + + /// Retrieve the local chunks held in the av-store (should be either 0 or 1). + async fn populate_from_av_store( + &mut self, + params: &RecoveryParams, + sender: &mut Sender, + ) -> Vec<(ValidatorIndex, ChunkIndex)> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx)) + .await; + + match rx.await { + Ok(chunks) => { + // This should either be length 1 or 0. If we had the whole data, + // we wouldn't have reached this stage. + let chunk_indices: Vec<_> = chunks + .iter() + .map(|(validator_index, chunk)| (*validator_index, chunk.index)) + .collect(); + + for (validator_index, chunk) in chunks { + if is_chunk_valid(params, &chunk) { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + chunk_index = ?chunk.index, + "Found valid chunk on disk" + ); + self.insert_chunk( + chunk.index, + Chunk { chunk: chunk.chunk, validator_index }, + ); + } else { + gum::error!( + target: LOG_TARGET, + "Loaded invalid chunk from disk! Disk/Db corruption _very_ likely - please fix ASAP!" + ); + }; + } + + chunk_indices + }, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + "Failed to reach the availability store" + ); + + vec![] + }, + } + } + + /// Launch chunk requests in parallel, according to the parameters. + async fn launch_parallel_chunk_requests( + &mut self, + strategy_type: &str, + params: &RecoveryParams, + sender: &mut Sender, + desired_requests_count: usize, + validators: &mut VecDeque<(AuthorityDiscoveryId, ValidatorIndex)>, + requesting_chunks: &mut OngoingRequests, + ) where + Sender: overseer::AvailabilityRecoverySenderTrait, + { + let candidate_hash = params.candidate_hash; + let already_requesting_count = requesting_chunks.len(); + + let to_launch = desired_requests_count - already_requesting_count; + let mut requests = Vec::with_capacity(to_launch); + + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + "Attempting to launch {} requests", + to_launch + ); + + while requesting_chunks.len() < desired_requests_count { + if let Some((authority_id, validator_index)) = validators.pop_back() { + gum::trace!( + target: LOG_TARGET, + ?authority_id, + ?validator_index, + ?candidate_hash, + "Requesting chunk", + ); + + // Request data. + let raw_request_v2 = + req_res::v2::ChunkFetchingRequest { candidate_hash, index: validator_index }; + let raw_request_v1 = req_res::v1::ChunkFetchingRequest::from(raw_request_v2); + + let (req, res) = OutgoingRequest::new_with_fallback( + Recipient::Authority(authority_id.clone()), + raw_request_v2, + raw_request_v1, + ); + requests.push(Requests::ChunkFetching(req)); + + params.metrics.on_chunk_request_issued(strategy_type); + let timer = params.metrics.time_chunk_request(strategy_type); + let v1_protocol_name = params.req_v1_protocol_name.clone(); + let v2_protocol_name = params.req_v2_protocol_name.clone(); + + let chunk_mapping_enabled = params.chunk_mapping_enabled; + let authority_id_clone = authority_id.clone(); + + requesting_chunks.push(Box::pin(async move { + let _timer = timer; + let res = match res.await { + Ok((bytes, protocol)) => + if v2_protocol_name == protocol { + match req_res::v2::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(req_res::v2::ChunkFetchingResponse::Chunk(chunk)) => + Ok((Some(chunk.into()), protocol)), + Ok(req_res::v2::ChunkFetchingResponse::NoSuchChunk) => + Ok((None, protocol)), + Err(e) => Err(RequestError::InvalidResponse(e)), + } + } else if v1_protocol_name == protocol { + // V1 protocol version must not be used when chunk mapping node + // feature is enabled, because we can't know the real index of the + // returned chunk. + // This case should never be reached as long as the + // `AvailabilityChunkMapping` feature is only enabled after the + // v1 version is removed. Still, log this. + if chunk_mapping_enabled { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + authority_id = ?authority_id_clone, + "Another validator is responding on /req_chunk/1 protocol while the availability chunk \ + mapping feature is enabled in the runtime. All validators must switch to /req_chunk/2." + ); + } + + match req_res::v1::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk)) => Ok(( + Some(chunk.recombine_into_chunk(&raw_request_v1)), + protocol, + )), + Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => + Ok((None, protocol)), + Err(e) => Err(RequestError::InvalidResponse(e)), + } + } else { + Err(RequestError::NetworkError(RequestFailure::UnknownProtocol)) + }, + + Err(e) => Err(e), + }; + + (authority_id, validator_index, res) + })); + } else { + break + } + } + + if requests.len() != 0 { + sender + .send_message(NetworkBridgeTxMessage::SendRequests( + requests, + IfDisconnected::TryConnect, + )) + .await; + } + } + + /// Wait for a sufficient amount of chunks to reconstruct according to the provided `params`. + async fn wait_for_chunks( + &mut self, + strategy_type: &str, + params: &RecoveryParams, + retry_threshold: u32, + validators: &mut VecDeque<(AuthorityDiscoveryId, ValidatorIndex)>, + requesting_chunks: &mut OngoingRequests, + // If supplied, these validators will be used as a backup for requesting chunks. They + // should hold all chunks. Each of them will only be used to query one chunk. + backup_validators: &mut Vec, + // Function that returns `true` when this strategy can conclude. Either if we got enough + // chunks or if it's impossible. + mut can_conclude: impl FnMut( + // Number of validators left in the queue + usize, + // Number of in flight requests + usize, + // Number of valid chunks received so far + usize, + // Number of valid systematic chunks received so far + usize, + ) -> bool, + ) -> (usize, usize) { + let metrics = ¶ms.metrics; + + let mut total_received_responses = 0; + let mut error_count = 0; + + // Wait for all current requests to conclude or time-out, or until we reach enough chunks. + // We also declare requests undead, once `TIMEOUT_START_NEW_REQUESTS` is reached and will + // return in that case for `launch_parallel_requests` to fill up slots again. + while let Some(res) = requesting_chunks.next_with_timeout(TIMEOUT_START_NEW_REQUESTS).await + { + total_received_responses += 1; + + let (authority_id, validator_index, request_result) = res; + + let mut is_error = false; + + match request_result { + Ok((maybe_chunk, protocol)) => { + match protocol { + name if name == params.req_v1_protocol_name => + params.metrics.on_chunk_response_v1(), + name if name == params.req_v2_protocol_name => + params.metrics.on_chunk_response_v2(), + _ => {}, + } + + match maybe_chunk { + Some(chunk) => + if is_chunk_valid(params, &chunk) { + metrics.on_chunk_request_succeeded(strategy_type); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + ?authority_id, + ?validator_index, + "Received valid chunk", + ); + self.insert_chunk( + chunk.index, + Chunk { chunk: chunk.chunk, validator_index }, + ); + } else { + metrics.on_chunk_request_invalid(strategy_type); + error_count += 1; + // Record that we got an invalid chunk so that subsequent strategies + // don't try requesting this again. + self.record_error_fatal(authority_id.clone(), validator_index); + is_error = true; + }, + None => { + metrics.on_chunk_request_no_such_chunk(strategy_type); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + ?authority_id, + ?validator_index, + "Validator did not have the chunk", + ); + error_count += 1; + // Record that the validator did not have this chunk so that subsequent + // strategies don't try requesting this again. + self.record_error_fatal(authority_id.clone(), validator_index); + is_error = true; + }, + } + }, + Err(err) => { + error_count += 1; + + gum::trace!( + target: LOG_TARGET, + candidate_hash= ?params.candidate_hash, + ?err, + ?authority_id, + ?validator_index, + "Failure requesting chunk", + ); + + is_error = true; + + match err { + RequestError::InvalidResponse(_) => { + metrics.on_chunk_request_invalid(strategy_type); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + ?err, + ?authority_id, + ?validator_index, + "Chunk fetching response was invalid", + ); + + // Record that we got an invalid chunk so that this or + // subsequent strategies don't try requesting this again. + self.record_error_fatal(authority_id.clone(), validator_index); + }, + RequestError::NetworkError(err) => { + // No debug logs on general network errors - that became very + // spammy occasionally. + if let RequestFailure::Network(OutboundFailure::Timeout) = err { + metrics.on_chunk_request_timeout(strategy_type); + } else { + metrics.on_chunk_request_error(strategy_type); + } + + // Record that we got a non-fatal error so that this or + // subsequent strategies will retry requesting this only a + // limited number of times. + self.record_error_non_fatal(authority_id.clone(), validator_index); + }, + RequestError::Canceled(_) => { + metrics.on_chunk_request_error(strategy_type); + + // Record that we got a non-fatal error so that this or + // subsequent strategies will retry requesting this only a + // limited number of times. + self.record_error_non_fatal(authority_id.clone(), validator_index); + }, + } + }, + } + + if is_error { + // First, see if we can retry the request. + if self.can_retry_request(&(authority_id.clone(), validator_index), retry_threshold) + { + validators.push_front((authority_id, validator_index)); + } else { + // Otherwise, try requesting from a backer as a backup, if we've not already + // requested the same chunk from it. + + let position = backup_validators.iter().position(|v| { + !self.recorded_errors.contains_key(&(v.clone(), validator_index)) + }); + if let Some(position) = position { + // Use swap_remove because it's faster and we don't care about order here. + let backer = backup_validators.swap_remove(position); + validators.push_front((backer, validator_index)); + } + } + } + + if can_conclude( + validators.len(), + requesting_chunks.total_len(), + self.chunk_count(), + self.systematic_chunk_count(params.systematic_threshold), + ) { + gum::debug!( + target: LOG_TARGET, + validators_len = validators.len(), + candidate_hash = ?params.candidate_hash, + received_chunks_count = ?self.chunk_count(), + requested_chunks_count = ?requesting_chunks.len(), + threshold = ?params.threshold, + "Can conclude availability recovery strategy", + ); + break + } + } + + (total_received_responses, error_count) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{tests::*, Metrics, RecoveryStrategy, RecoveryTask}; + use assert_matches::assert_matches; + use futures::{ + channel::mpsc::{self, UnboundedReceiver}, + executor, future, Future, FutureExt, StreamExt, + }; + use parity_scale_codec::Error as DecodingError; + use polkadot_erasure_coding::{recovery_threshold, systematic_recovery_threshold}; + use polkadot_node_network_protocol::request_response::Protocol; + use polkadot_node_primitives::{BlockData, PoV}; + use polkadot_node_subsystem::{AllMessages, TimeoutExt}; + use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, sender_receiver, TestSubsystemSender, + }; + use polkadot_primitives::{CandidateHash, HeadData, PersistedValidationData}; + use polkadot_primitives_test_helpers::dummy_hash; + use sp_keyring::Sr25519Keyring; + use std::sync::Arc; + + const TIMEOUT: Duration = Duration::from_secs(1); + + impl Default for RecoveryParams { + fn default() -> Self { + let validators = vec![ + Sr25519Keyring::Ferdie, + Sr25519Keyring::Alice.into(), + Sr25519Keyring::Bob.into(), + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::One, + Sr25519Keyring::Two, + ]; + let (erasure_task_tx, _erasure_task_rx) = mpsc::channel(10); + + Self { + validator_authority_keys: validator_authority_id(&validators), + n_validators: validators.len(), + threshold: recovery_threshold(validators.len()).unwrap(), + systematic_threshold: systematic_recovery_threshold(validators.len()).unwrap(), + candidate_hash: CandidateHash(dummy_hash()), + erasure_root: dummy_hash(), + metrics: Metrics::new_dummy(), + bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, + pov_hash: dummy_hash(), + req_v1_protocol_name: "/req_chunk/1".into(), + req_v2_protocol_name: "/req_chunk/2".into(), + chunk_mapping_enabled: true, + erasure_task_tx, + } + } + } + + impl RecoveryParams { + fn create_chunks(&mut self) -> Vec { + let available_data = dummy_available_data(); + let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + self.n_validators, + &available_data, + |_, _| {}, + ); + + self.erasure_root = erasure_root; + self.pov_hash = available_data.pov.hash(); + + chunks + } + } + + fn dummy_available_data() -> AvailableData { + let validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + AvailableData { + validation_data, + pov: Arc::new(PoV { block_data: BlockData(vec![42; 64]) }), + } + } + + fn test_harness, TestFut: Future>( + receiver_future: impl FnOnce(UnboundedReceiver) -> RecvFut, + test: impl FnOnce(TestSubsystemSender) -> TestFut, + ) { + let (sender, receiver) = sender_receiver(); + + let test_fut = test(sender); + let receiver_future = receiver_future(receiver); + + futures::pin_mut!(test_fut); + futures::pin_mut!(receiver_future); + + executor::block_on(future::join(test_fut, receiver_future)).1 + } + + #[test] + fn test_recorded_errors() { + let retry_threshold = 2; + let mut state = State::new(); + + let alice = Sr25519Keyring::Alice.public(); + let bob = Sr25519Keyring::Bob.public(); + let eve = Sr25519Keyring::Eve.public(); + + assert!(state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + assert!(state.can_retry_request(&(alice.into(), 0.into()), 0)); + state.record_error_non_fatal(alice.into(), 0.into()); + assert!(state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + state.record_error_non_fatal(alice.into(), 0.into()); + assert!(!state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + state.record_error_non_fatal(alice.into(), 0.into()); + assert!(!state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + + assert!(state.can_retry_request(&(alice.into(), 0.into()), 5)); + + state.record_error_fatal(bob.into(), 1.into()); + assert!(!state.can_retry_request(&(bob.into(), 1.into()), retry_threshold)); + state.record_error_non_fatal(bob.into(), 1.into()); + assert!(!state.can_retry_request(&(bob.into(), 1.into()), retry_threshold)); + + assert!(state.can_retry_request(&(eve.into(), 4.into()), 0)); + assert!(state.can_retry_request(&(eve.into(), 4.into()), retry_threshold)); + } + + #[test] + fn test_populate_from_av_store() { + let params = RecoveryParams::default(); + + // Failed to reach the av store + { + let params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut state = State::new(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAllChunks(hash, tx)) => { + assert_eq!(hash, candidate_hash); + drop(tx); + }); + }, + |mut sender| async move { + let local_chunk_indices = + state.populate_from_av_store(¶ms, &mut sender).await; + + assert_eq!(state.chunk_count(), 0); + assert_eq!(local_chunk_indices.len(), 0); + }, + ); + } + + // Found invalid chunk + { + let mut params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut state = State::new(); + let chunks = params.create_chunks(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAllChunks(hash, tx)) => { + assert_eq!(hash, candidate_hash); + let mut chunk = chunks[0].clone(); + chunk.index = 3.into(); + tx.send(vec![(2.into(), chunk)]).unwrap(); + }); + }, + |mut sender| async move { + let local_chunk_indices = + state.populate_from_av_store(¶ms, &mut sender).await; + + assert_eq!(state.chunk_count(), 0); + assert_eq!(local_chunk_indices.len(), 1); + }, + ); + } + + // Found valid chunk + { + let mut params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut state = State::new(); + let chunks = params.create_chunks(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAllChunks(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(vec![(4.into(), chunks[1].clone())]).unwrap(); + }); + }, + |mut sender| async move { + let local_chunk_indices = + state.populate_from_av_store(¶ms, &mut sender).await; + + assert_eq!(state.chunk_count(), 1); + assert_eq!(local_chunk_indices.len(), 1); + }, + ); + } + } + + #[test] + fn test_launch_parallel_chunk_requests() { + let params = RecoveryParams::default(); + let alice: AuthorityDiscoveryId = Sr25519Keyring::Alice.public().into(); + let bob: AuthorityDiscoveryId = Sr25519Keyring::Bob.public().into(); + let eve: AuthorityDiscoveryId = Sr25519Keyring::Eve.public().into(); + + // No validators to request from. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 3, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 0); + }, + ); + } + + // Has validators but no need to request more. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + validators.push_back((alice.clone(), ValidatorIndex(1))); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 0, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 0); + }, + ); + } + + // Has validators but no need to request more. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push(async { todo!() }.boxed()); + ongoing_reqs.soft_cancel(); + let mut validators = VecDeque::new(); + validators.push_back((alice.clone(), ValidatorIndex(1))); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 0, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 1); + assert_eq!(ongoing_reqs.len(), 0); + }, + ); + } + + // Needs to request more. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push(async { todo!() }.boxed()); + ongoing_reqs.soft_cancel(); + ongoing_reqs.push(async { todo!() }.boxed()); + let mut validators = VecDeque::new(); + validators.push_back((alice.clone(), 0.into())); + validators.push_back((bob, 1.into())); + validators.push_back((eve, 2.into())); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(requests, _)) if requests.len() +== 3 ); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 10, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 5); + assert_eq!(ongoing_reqs.len(), 4); + }, + ); + } + + // Check network protocol versioning. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + validators.push_back((alice, 0.into())); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + match receiver.next().timeout(TIMEOUT).await.unwrap().unwrap() { + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests( + mut requests, + _, + )) => { + assert_eq!(requests.len(), 1); + // By default, we should use the new protocol version with a fallback on + // the older one. + let (protocol, request) = requests.remove(0).encode_request(); + assert_eq!(protocol, Protocol::ChunkFetchingV2); + assert_eq!( + request.fallback_request.unwrap().1, + Protocol::ChunkFetchingV1 + ); + }, + _ => unreachable!(), + } + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 10, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 1); + assert_eq!(ongoing_reqs.len(), 1); + }, + ); + } + } + + #[test] + fn test_wait_for_chunks() { + let params = RecoveryParams::default(); + let retry_threshold = 2; + + // No ongoing requests. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |_| async move { + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 0); + assert_eq!(error_count, 0); + assert_eq!(state.chunk_count(), 0); + }, + ); + } + + // Complex scenario. + { + let mut params = params.clone(); + let chunks = params.create_chunks(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[0].clone(), + 0.into(), + Ok((Some(chunks[0].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.soft_cancel(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[1].clone(), + 1.into(), + Ok((Some(chunks[1].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[2].clone(), + 2.into(), + Ok((None, "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[3].clone(), + 3.into(), + Err(RequestError::from(DecodingError::from("err"))), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + let mut validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + validators.push_back(( + Sr25519Keyring::AliceStash.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |_| async move { + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 5); + assert_eq!(error_count, 3); + assert_eq!(state.chunk_count(), 2); + + let mut expected_validators: VecDeque<_> = (4..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + expected_validators.push_back(( + Sr25519Keyring::AliceStash.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + assert_eq!(validators, expected_validators); + + // This time we'll go over the recoverable error threshold. + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 1); + assert_eq!(error_count, 1); + assert_eq!(state.chunk_count(), 2); + + validators.pop_front(); + let mut expected_validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + expected_validators.push_back(( + Sr25519Keyring::AliceStash.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + assert_eq!(validators, expected_validators); + + // Check that can_conclude returning true terminates the loop. + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| true, + ) + .await; + assert_eq!(total_responses, 0); + assert_eq!(error_count, 0); + assert_eq!(state.chunk_count(), 2); + + assert_eq!(validators, expected_validators); + }, + ); + } + + // Complex scenario with backups in the backing group. + { + let mut params = params.clone(); + let chunks = params.create_chunks(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[0].clone(), + 0.into(), + Ok((Some(chunks[0].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.soft_cancel(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[1].clone(), + 1.into(), + Ok((Some(chunks[1].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[2].clone(), + 2.into(), + Ok((None, "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[3].clone(), + 3.into(), + Err(RequestError::from(DecodingError::from("err"))), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + let mut validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + validators.push_back(( + Sr25519Keyring::Eve.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + let mut backup_backers = vec![ + params.validator_authority_keys[2].clone(), + params.validator_authority_keys[0].clone(), + params.validator_authority_keys[4].clone(), + params.validator_authority_keys[3].clone(), + Sr25519Keyring::AliceStash.public().into(), + Sr25519Keyring::BobStash.public().into(), + ]; + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |_| async move { + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut backup_backers, + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 5); + assert_eq!(error_count, 3); + assert_eq!(state.chunk_count(), 2); + + let mut expected_validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + expected_validators.push_back(( + Sr25519Keyring::Eve.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + // We picked a backer as a backup for chunks 2 and 3. + expected_validators + .push_front((params.validator_authority_keys[0].clone(), 2.into())); + expected_validators + .push_front((params.validator_authority_keys[2].clone(), 3.into())); + expected_validators + .push_front((params.validator_authority_keys[4].clone(), 4.into())); + + assert_eq!(validators, expected_validators); + + // This time we'll go over the recoverable error threshold for chunk 4. + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + validators.pop_front(); + + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut backup_backers, + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 1); + assert_eq!(error_count, 1); + assert_eq!(state.chunk_count(), 2); + + expected_validators.pop_front(); + expected_validators + .push_front((Sr25519Keyring::AliceStash.public().into(), 4.into())); + + assert_eq!(validators, expected_validators); + }, + ); + } + } + + #[test] + fn test_recovery_strategy_run() { + let params = RecoveryParams::default(); + + struct GoodStrategy; + #[async_trait::async_trait] + impl RecoveryStrategy for GoodStrategy { + fn display_name(&self) -> &'static str { + "GoodStrategy" + } + + fn strategy_type(&self) -> &'static str { + "good_strategy" + } + + async fn run( + mut self: Box, + _state: &mut State, + _sender: &mut Sender, + _common_params: &RecoveryParams, + ) -> Result { + Ok(dummy_available_data()) + } + } + + struct UnavailableStrategy; + #[async_trait::async_trait] + impl RecoveryStrategy + for UnavailableStrategy + { + fn display_name(&self) -> &'static str { + "UnavailableStrategy" + } + + fn strategy_type(&self) -> &'static str { + "unavailable_strategy" + } + + async fn run( + mut self: Box, + _state: &mut State, + _sender: &mut Sender, + _common_params: &RecoveryParams, + ) -> Result { + Err(RecoveryError::Unavailable) + } + } + + struct InvalidStrategy; + #[async_trait::async_trait] + impl RecoveryStrategy + for InvalidStrategy + { + fn display_name(&self) -> &'static str { + "InvalidStrategy" + } + + fn strategy_type(&self) -> &'static str { + "invalid_strategy" + } + + async fn run( + mut self: Box, + _state: &mut State, + _sender: &mut Sender, + _common_params: &RecoveryParams, + ) -> Result { + Err(RecoveryError::Invalid) + } + } + + // No recovery strategies. + { + let mut params = params.clone(); + let strategies = VecDeque::new(); + params.bypass_availability_store = true; + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap_err(), RecoveryError::Unavailable); + }, + ); + } + + // If we have the data in av-store, returns early. + { + let params = params.clone(); + let strategies = VecDeque::new(); + let candidate_hash = params.candidate_hash; + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAvailableData(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(Some(dummy_available_data())).unwrap(); + }); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap(), dummy_available_data()); + }, + ); + } + + // Strategy returning `RecoveryError::Invalid`` will short-circuit the entire task. + { + let mut params = params.clone(); + params.bypass_availability_store = true; + let mut strategies: VecDeque>> = + VecDeque::new(); + strategies.push_back(Box::new(InvalidStrategy)); + strategies.push_back(Box::new(GoodStrategy)); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap_err(), RecoveryError::Invalid); + }, + ); + } + + // Strategy returning `Unavailable` will fall back to the next one. + { + let params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut strategies: VecDeque>> = + VecDeque::new(); + strategies.push_back(Box::new(UnavailableStrategy)); + strategies.push_back(Box::new(GoodStrategy)); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAvailableData(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(Some(dummy_available_data())).unwrap(); + }); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap(), dummy_available_data()); + }, + ); + } + + // More complex scenario. + { + let params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut strategies: VecDeque>> = + VecDeque::new(); + strategies.push_back(Box::new(UnavailableStrategy)); + strategies.push_back(Box::new(UnavailableStrategy)); + strategies.push_back(Box::new(GoodStrategy)); + strategies.push_back(Box::new(InvalidStrategy)); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAvailableData(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(Some(dummy_available_data())).unwrap(); + }); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap(), dummy_available_data()); + }, + ); + } + } + + #[test] + fn test_is_unavailable() { + assert_eq!(is_unavailable(0, 0, 0, 0), false); + assert_eq!(is_unavailable(2, 2, 2, 0), false); + // Already reached the threshold. + assert_eq!(is_unavailable(3, 0, 10, 3), false); + assert_eq!(is_unavailable(3, 2, 0, 3), false); + assert_eq!(is_unavailable(3, 2, 10, 3), false); + // It's still possible to reach the threshold + assert_eq!(is_unavailable(0, 0, 10, 3), false); + assert_eq!(is_unavailable(0, 0, 3, 3), false); + assert_eq!(is_unavailable(1, 1, 1, 3), false); + // Not possible to reach the threshold + assert_eq!(is_unavailable(0, 0, 0, 3), true); + assert_eq!(is_unavailable(2, 3, 2, 10), true); + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs new file mode 100644 index 00000000000..677bc2d1375 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs @@ -0,0 +1,343 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + futures_undead::FuturesUndead, + task::{ + strategy::{ + do_post_recovery_check, is_unavailable, OngoingRequests, N_PARALLEL, + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + }, + RecoveryParams, RecoveryStrategy, State, + }, + LOG_TARGET, +}; + +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{overseer, RecoveryError}; +use polkadot_primitives::{ChunkIndex, ValidatorIndex}; + +use std::collections::VecDeque; + +/// Parameters needed for fetching systematic chunks. +pub struct FetchSystematicChunksParams { + /// Validators that hold the systematic chunks. + pub validators: Vec<(ChunkIndex, ValidatorIndex)>, + /// Validators in the backing group, to be used as a backup for requesting systematic chunks. + pub backers: Vec, +} + +/// `RecoveryStrategy` that attempts to recover the systematic chunks from the validators that +/// hold them, in order to bypass the erasure code reconstruction step, which is costly. +pub struct FetchSystematicChunks { + /// Systematic recovery threshold. + threshold: usize, + /// Validators that hold the systematic chunks. + validators: Vec<(ChunkIndex, ValidatorIndex)>, + /// Backers to be used as a backup. + backers: Vec, + /// Collection of in-flight requests. + requesting_chunks: OngoingRequests, +} + +impl FetchSystematicChunks { + /// Instantiate a new systematic chunks strategy. + pub fn new(params: FetchSystematicChunksParams) -> Self { + Self { + threshold: params.validators.len(), + validators: params.validators, + backers: params.backers, + requesting_chunks: FuturesUndead::new(), + } + } + + fn is_unavailable( + unrequested_validators: usize, + in_flight_requests: usize, + systematic_chunk_count: usize, + threshold: usize, + ) -> bool { + is_unavailable( + systematic_chunk_count, + in_flight_requests, + unrequested_validators, + threshold, + ) + } + + /// Desired number of parallel requests. + /// + /// For the given threshold (total required number of chunks) get the desired number of + /// requests we want to have running in parallel at this time. + fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { + // Upper bound for parallel requests. + let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); + // How many chunks are still needed? + let remaining_chunks = threshold.saturating_sub(chunk_count); + // Actual number of requests we want to have in flight in parallel: + // We don't have to make up for any error rate, as an error fetching a systematic chunk + // results in failure of the entire strategy. + std::cmp::min(max_requests_boundary, remaining_chunks) + } + + async fn attempt_systematic_recovery( + &mut self, + state: &mut State, + common_params: &RecoveryParams, + ) -> Result { + let strategy_type = RecoveryStrategy::::strategy_type(self); + let recovery_duration = common_params.metrics.time_erasure_recovery(strategy_type); + let reconstruct_duration = common_params.metrics.time_erasure_reconstruct(strategy_type); + let chunks = state + .received_chunks + .range( + ChunkIndex(0).. + ChunkIndex( + u32::try_from(self.threshold) + .expect("validator count should not exceed u32"), + ), + ) + .map(|(_, chunk)| chunk.chunk.clone()) + .collect::>(); + + let available_data = polkadot_erasure_coding::reconstruct_from_systematic_v1( + common_params.n_validators, + chunks, + ); + + match available_data { + Ok(data) => { + drop(reconstruct_duration); + + // Attempt post-recovery check. + do_post_recovery_check(common_params, data) + .await + .map_err(|e| { + recovery_duration.map(|rd| rd.stop_and_discard()); + e + }) + .map(|data| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery from systematic chunks complete", + ); + data + }) + }, + Err(err) => { + reconstruct_duration.map(|rd| rd.stop_and_discard()); + recovery_duration.map(|rd| rd.stop_and_discard()); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + ?err, + "Systematic data recovery error", + ); + + Err(RecoveryError::Invalid) + }, + } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy + for FetchSystematicChunks +{ + fn display_name(&self) -> &'static str { + "Fetch systematic chunks" + } + + fn strategy_type(&self) -> &'static str { + "systematic_chunks" + } + + async fn run( + mut self: Box, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + // First query the store for any chunks we've got. + if !common_params.bypass_availability_store { + let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; + + for (_, our_c_index) in &local_chunk_indices { + // If we are among the systematic validators but hold an invalid chunk, we cannot + // perform the systematic recovery. Fall through to the next strategy. + if self.validators.iter().any(|(c_index, _)| c_index == our_c_index) && + !state.received_chunks.contains_key(our_c_index) + { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + chunk_index = ?our_c_index, + "Systematic chunk recovery is not possible. We are among the systematic validators but hold an invalid chunk", + ); + return Err(RecoveryError::Unavailable) + } + } + } + + // No need to query the validators that have the chunks we already received or that we know + // don't have the data from previous strategies. + self.validators.retain(|(c_index, v_index)| { + !state.received_chunks.contains_key(c_index) && + state.can_retry_request( + &(common_params.validator_authority_keys[v_index.0 as usize].clone(), *v_index), + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + ) + }); + + let mut systematic_chunk_count = state + .received_chunks + .range(ChunkIndex(0)..ChunkIndex(self.threshold as u32)) + .count(); + + // Safe to `take` here, as we're consuming `self` anyway and we're not using the + // `validators` or `backers` fields in other methods. + let mut validators_queue: VecDeque<_> = std::mem::take(&mut self.validators) + .into_iter() + .map(|(_, validator_index)| { + ( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + validator_index, + ) + }) + .collect(); + let mut backers: Vec<_> = std::mem::take(&mut self.backers) + .into_iter() + .map(|validator_index| { + common_params.validator_authority_keys[validator_index.0 as usize].clone() + }) + .collect(); + + loop { + // If received_chunks has `systematic_chunk_threshold` entries, attempt to recover the + // data. + if systematic_chunk_count >= self.threshold { + return self.attempt_systematic_recovery::(state, common_params).await + } + + if Self::is_unavailable( + validators_queue.len(), + self.requesting_chunks.total_len(), + systematic_chunk_count, + self.threshold, + ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + %systematic_chunk_count, + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + systematic_threshold = ?self.threshold, + "Data recovery from systematic chunks is not possible", + ); + + return Err(RecoveryError::Unavailable) + } + + let desired_requests_count = + self.get_desired_request_count(systematic_chunk_count, self.threshold); + let already_requesting_count = self.requesting_chunks.len(); + gum::debug!( + target: LOG_TARGET, + ?common_params.candidate_hash, + ?desired_requests_count, + total_received = ?systematic_chunk_count, + systematic_threshold = ?self.threshold, + ?already_requesting_count, + "Requesting systematic availability chunks for a candidate", + ); + + let strategy_type = RecoveryStrategy::::strategy_type(&*self); + + state + .launch_parallel_chunk_requests( + strategy_type, + common_params, + sender, + desired_requests_count, + &mut validators_queue, + &mut self.requesting_chunks, + ) + .await; + + let _ = state + .wait_for_chunks( + strategy_type, + common_params, + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + &mut validators_queue, + &mut self.requesting_chunks, + &mut backers, + |unrequested_validators, + in_flight_reqs, + // Don't use this chunk count, as it may contain non-systematic chunks. + _chunk_count, + new_systematic_chunk_count| { + systematic_chunk_count = new_systematic_chunk_count; + + let is_unavailable = Self::is_unavailable( + unrequested_validators, + in_flight_reqs, + systematic_chunk_count, + self.threshold, + ); + + systematic_chunk_count >= self.threshold || is_unavailable + }, + ) + .await; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_erasure_coding::systematic_recovery_threshold; + + #[test] + fn test_get_desired_request_count() { + let num_validators = 100; + let threshold = systematic_recovery_threshold(num_validators).unwrap(); + + let systematic_chunks_task = FetchSystematicChunks::new(FetchSystematicChunksParams { + validators: vec![(1.into(), 1.into()); num_validators], + backers: vec![], + }); + assert_eq!(systematic_chunks_task.get_desired_request_count(0, threshold), threshold); + assert_eq!(systematic_chunks_task.get_desired_request_count(5, threshold), threshold - 5); + assert_eq!( + systematic_chunks_task.get_desired_request_count(num_validators * 2, threshold), + 0 + ); + assert_eq!(systematic_chunks_task.get_desired_request_count(0, N_PARALLEL * 2), N_PARALLEL); + assert_eq!(systematic_chunks_task.get_desired_request_count(N_PARALLEL, N_PARALLEL + 2), 2); + } +} diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 6049a5a5c3a..d0a4a2d8b60 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -14,38 +14,133 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{sync::Arc, time::Duration}; +use crate::task::{REGULAR_CHUNKS_REQ_RETRY_LIMIT, SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT}; + +use super::*; +use std::{result::Result, sync::Arc, time::Duration}; use assert_matches::assert_matches; use futures::{executor, future}; use futures_timer::Delay; +use rstest::rstest; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ - self as req_res, v1::AvailableDataFetchingRequest, IncomingRequest, Protocol, Recipient, - ReqProtocolNames, Requests, + self as req_res, + v1::{AvailableDataFetchingRequest, ChunkResponse}, + IncomingRequest, Protocol, Recipient, ReqProtocolNames, Requests, }; -use polkadot_node_subsystem_test_helpers::derive_erasure_chunks_with_proofs_and_root; - -use super::*; -use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; - -use polkadot_node_primitives::{BlockData, PoV, Proof}; +use polkadot_node_primitives::{BlockData, ErasureChunk, PoV, Proof}; use polkadot_node_subsystem::messages::{ AllMessages, NetworkBridgeTxMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers::{ - make_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, + derive_erasure_chunks_with_proofs_and_root, make_subsystem_context, mock::new_leaf, + TestSubsystemContextHandle, }; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - AuthorityDiscoveryId, Block, Hash, HeadData, IndexedVec, PersistedValidationData, ValidatorId, + node_features, AuthorityDiscoveryId, Block, ExecutorParams, Hash, HeadData, IndexedVec, + NodeFeatures, PersistedValidationData, SessionInfo, ValidatorId, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; +use sp_keyring::Sr25519Keyring; type VirtualOverseer = TestSubsystemContextHandle; +// Implement some helper constructors for the AvailabilityRecoverySubsystem + +/// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to +/// request data from backers. +fn with_fast_path( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::BackersFirstAlways, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which requests only chunks +fn with_chunks_only( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::ChunksAlways, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which requests chunks if PoV is +/// above a threshold. +fn with_chunks_if_pov_large( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::BackersFirstIfSizeLower(FETCH_CHUNKS_THRESHOLD), + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which requests systematic chunks if +/// PoV is above a threshold. +fn with_systematic_chunks_if_pov_large( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::for_validator( + Some(FETCH_CHUNKS_THRESHOLD), + req_receiver, + req_protocol_names, + metrics, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which first requests full data +/// from backers, with a fallback to recover from systematic chunks. +fn with_fast_path_then_systematic_chunks( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::BackersThenSystematicChunks, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which first attempts to request +/// systematic chunks, with a fallback to requesting regular chunks. +fn with_systematic_chunks( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::SystematicChunks, + ) +} + // Deterministic genesis hash for protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); @@ -61,14 +156,11 @@ fn request_receiver( receiver.0 } -fn test_harness>( +fn test_harness>( subsystem: AvailabilityRecoverySubsystem, - test: impl FnOnce(VirtualOverseer) -> T, + test: impl FnOnce(VirtualOverseer) -> Fut, ) { - let _ = env_logger::builder() - .is_test(true) - .filter(Some("polkadot_availability_recovery"), log::LevelFilter::Trace) - .try_init(); + sp_tracing::init_for_tests(); let pool = sp_core::testing::TaskExecutor::new(); @@ -138,8 +230,6 @@ async fn overseer_recv( msg } -use sp_keyring::Sr25519Keyring; - #[derive(Debug)] enum Has { No, @@ -163,27 +253,127 @@ struct TestState { validators: Vec, validator_public: IndexedVec, validator_authority_id: Vec, + validator_groups: IndexedVec>, current: Hash, candidate: CandidateReceipt, session_index: SessionIndex, + core_index: CoreIndex, + node_features: NodeFeatures, persisted_validation_data: PersistedValidationData, available_data: AvailableData, - chunks: Vec, - invalid_chunks: Vec, + chunks: IndexedVec, + invalid_chunks: IndexedVec, } impl TestState { + fn new(node_features: NodeFeatures) -> Self { + let validators = vec![ + Sr25519Keyring::Ferdie, // <- this node, role: validator + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::One, + Sr25519Keyring::Two, + ]; + + let validator_public = validator_pubkeys(&validators); + let validator_authority_id = validator_authority_id(&validators); + let validator_groups = vec![ + vec![1.into(), 0.into(), 3.into(), 4.into()], + vec![5.into(), 6.into()], + vec![2.into()], + ]; + + let current = Hash::repeat_byte(1); + + let mut candidate = dummy_candidate_receipt(dummy_hash()); + + let session_index = 10; + + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + let pov = PoV { block_data: BlockData(vec![42; 64]) }; + + let available_data = AvailableData { + validation_data: persisted_validation_data.clone(), + pov: Arc::new(pov), + }; + + let core_index = CoreIndex(2); + + let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + validators.len(), + &available_data, + |_, _| {}, + ); + let chunks = map_chunks(chunks, &node_features, validators.len(), core_index); + + // Mess around: + let invalid_chunks = chunks + .iter() + .cloned() + .map(|mut chunk| { + if chunk.chunk.len() >= 2 && chunk.chunk[0] != chunk.chunk[1] { + chunk.chunk[0] = chunk.chunk[1]; + } else if chunk.chunk.len() >= 1 { + chunk.chunk[0] = !chunk.chunk[0]; + } else { + chunk.proof = Proof::dummy_proof(); + } + chunk + }) + .collect(); + debug_assert_ne!(chunks, invalid_chunks); + + candidate.descriptor.erasure_root = erasure_root; + candidate.descriptor.relay_parent = Hash::repeat_byte(10); + candidate.descriptor.pov_hash = Hash::repeat_byte(3); + + Self { + validators, + validator_public, + validator_authority_id, + validator_groups: IndexedVec::>::try_from( + validator_groups, + ) + .unwrap(), + current, + candidate, + session_index, + core_index, + node_features, + persisted_validation_data, + available_data, + chunks, + invalid_chunks, + } + } + + fn with_empty_node_features() -> Self { + Self::new(NodeFeatures::EMPTY) + } + fn threshold(&self) -> usize { recovery_threshold(self.validators.len()).unwrap() } + fn systematic_threshold(&self) -> usize { + systematic_recovery_threshold(self.validators.len()).unwrap() + } + fn impossibility_threshold(&self) -> usize { self.validators.len() - self.threshold() + 1 } - async fn test_runtime_api(&self, virtual_overseer: &mut VirtualOverseer) { + async fn test_runtime_api_session_info(&self, virtual_overseer: &mut VirtualOverseer) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -199,8 +389,7 @@ impl TestState { tx.send(Ok(Some(SessionInfo { validators: self.validator_public.clone(), discovery_keys: self.validator_authority_id.clone(), - // all validators in the same group. - validator_groups: IndexedVec::>::from(vec![(0..self.validators.len()).map(|i| ValidatorIndex(i as _)).collect()]), + validator_groups: self.validator_groups.clone(), assignment_keys: vec![], n_cores: 0, zeroth_delay_tranche_width: 0, @@ -214,6 +403,38 @@ impl TestState { }))).unwrap(); } ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionExecutorParams( + session_index, + tx, + ) + )) => { + assert_eq!(relay_parent, self.current); + assert_eq!(session_index, self.session_index); + + tx.send(Ok(Some(ExecutorParams::new()))).unwrap(); + } + ); + } + + async fn test_runtime_api_node_features(&self, virtual_overseer: &mut VirtualOverseer) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::NodeFeatures( + _, + tx, + ) + )) => { + tx.send(Ok( + self.node_features.clone() + )).unwrap(); + } + ); } async fn respond_to_available_data_query( @@ -239,16 +460,19 @@ impl TestState { async fn respond_to_query_all_request( &self, virtual_overseer: &mut VirtualOverseer, - send_chunk: impl Fn(usize) -> bool, + send_chunk: impl Fn(ValidatorIndex) -> bool, ) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryAllChunks(_, tx) ) => { - let v = self.chunks.iter() - .filter(|c| send_chunk(c.index.0 as usize)) - .cloned() + let v = self.chunks.iter().enumerate() + .filter_map(|(val_idx, c)| if send_chunk(ValidatorIndex(val_idx as u32)) { + Some((ValidatorIndex(val_idx as u32), c.clone())) + } else { + None + }) .collect(); let _ = tx.send(v); @@ -259,16 +483,19 @@ impl TestState { async fn respond_to_query_all_request_invalid( &self, virtual_overseer: &mut VirtualOverseer, - send_chunk: impl Fn(usize) -> bool, + send_chunk: impl Fn(ValidatorIndex) -> bool, ) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryAllChunks(_, tx) ) => { - let v = self.invalid_chunks.iter() - .filter(|c| send_chunk(c.index.0 as usize)) - .cloned() + let v = self.invalid_chunks.iter().enumerate() + .filter_map(|(val_idx, c)| if send_chunk(ValidatorIndex(val_idx as u32)) { + Some((ValidatorIndex(val_idx as u32), c.clone())) + } else { + None + }) .collect(); let _ = tx.send(v); @@ -276,14 +503,16 @@ impl TestState { ) } - async fn test_chunk_requests( + async fn test_chunk_requests_inner( &self, req_protocol_names: &ReqProtocolNames, candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, n: usize, - who_has: impl Fn(usize) -> Has, - ) -> Vec, ProtocolName), RequestFailure>>> { + mut who_has: impl FnMut(ValidatorIndex) -> Has, + systematic_recovery: bool, + protocol: Protocol, + ) -> Vec, ProtocolName), RequestFailure>>> { // arbitrary order. let mut i = 0; let mut senders = Vec::new(); @@ -301,13 +530,19 @@ impl TestState { i += 1; assert_matches!( req, - Requests::ChunkFetchingV1(req) => { + Requests::ChunkFetching(req) => { assert_eq!(req.payload.candidate_hash, candidate_hash); - let validator_index = req.payload.index.0 as usize; + let validator_index = req.payload.index; + let chunk = self.chunks.get(validator_index).unwrap().clone(); + + if systematic_recovery { + assert!(chunk.index.0 as usize <= self.systematic_threshold(), "requested non-systematic chunk"); + } + let available_data = match who_has(validator_index) { Has::No => Ok(None), - Has::Yes => Ok(Some(self.chunks[validator_index].clone().into())), + Has::Yes => Ok(Some(chunk)), Has::NetworkError(e) => Err(e), Has::DoesNotReturn => { senders.push(req.pending_response); @@ -315,11 +550,29 @@ impl TestState { } }; - let _ = req.pending_response.send( + req.pending_response.send( available_data.map(|r| - (req_res::v1::ChunkFetchingResponse::from(r).encode(), req_protocol_names.get_name(Protocol::ChunkFetchingV1)) + ( + match protocol { + Protocol::ChunkFetchingV1 => + match r { + None => req_res::v1::ChunkFetchingResponse::NoSuchChunk, + Some(c) => req_res::v1::ChunkFetchingResponse::Chunk( + ChunkResponse { + chunk: c.chunk, + proof: c.proof + } + ) + }.encode(), + Protocol::ChunkFetchingV2 => + req_res::v2::ChunkFetchingResponse::from(r).encode(), + + _ => unreachable!() + }, + req_protocol_names.get_name(protocol) + ) ) - ); + ).unwrap(); } ) } @@ -329,16 +582,61 @@ impl TestState { senders } + async fn test_chunk_requests( + &self, + req_protocol_names: &ReqProtocolNames, + candidate_hash: CandidateHash, + virtual_overseer: &mut VirtualOverseer, + n: usize, + who_has: impl FnMut(ValidatorIndex) -> Has, + systematic_recovery: bool, + ) -> Vec, ProtocolName), RequestFailure>>> { + self.test_chunk_requests_inner( + req_protocol_names, + candidate_hash, + virtual_overseer, + n, + who_has, + systematic_recovery, + Protocol::ChunkFetchingV2, + ) + .await + } + + // Use legacy network protocol version. + async fn test_chunk_requests_v1( + &self, + req_protocol_names: &ReqProtocolNames, + candidate_hash: CandidateHash, + virtual_overseer: &mut VirtualOverseer, + n: usize, + who_has: impl FnMut(ValidatorIndex) -> Has, + systematic_recovery: bool, + ) -> Vec, ProtocolName), RequestFailure>>> { + self.test_chunk_requests_inner( + req_protocol_names, + candidate_hash, + virtual_overseer, + n, + who_has, + systematic_recovery, + Protocol::ChunkFetchingV1, + ) + .await + } + async fn test_full_data_requests( &self, req_protocol_names: &ReqProtocolNames, candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, who_has: impl Fn(usize) -> Has, - ) -> Vec, ProtocolName), RequestFailure>>> { + group_index: GroupIndex, + ) -> Vec, ProtocolName), RequestFailure>>> { let mut senders = Vec::new(); - for _ in 0..self.validators.len() { - // Receive a request for a chunk. + let expected_validators = self.validator_groups.get(group_index).unwrap(); + for _ in 0..expected_validators.len() { + // Receive a request for the full `AvailableData`. assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx( @@ -357,6 +655,7 @@ impl TestState { .iter() .position(|a| Recipient::Authority(a.clone()) == req.peer) .unwrap(); + assert!(expected_validators.contains(&ValidatorIndex(validator_index as u32))); let available_data = match who_has(validator_index) { Has::No => Ok(None), @@ -387,95 +686,67 @@ impl TestState { } } +impl Default for TestState { + fn default() -> Self { + // Enable the chunk mapping node feature. + let mut node_features = NodeFeatures::new(); + node_features + .resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features + .set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + + Self::new(node_features) + } +} + fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> IndexedVec { val_ids.iter().map(|v| v.public().into()).collect() } -fn validator_authority_id(val_ids: &[Sr25519Keyring]) -> Vec { +pub fn validator_authority_id(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } -impl Default for TestState { - fn default() -> Self { - let validators = vec![ - Sr25519Keyring::Ferdie, // <- this node, role: validator - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - ]; - - let validator_public = validator_pubkeys(&validators); - let validator_authority_id = validator_authority_id(&validators); - - let current = Hash::repeat_byte(1); - - let mut candidate = dummy_candidate_receipt(dummy_hash()); - - let session_index = 10; - - let persisted_validation_data = PersistedValidationData { - parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: Default::default(), - max_pov_size: 1024, - relay_parent_storage_root: Default::default(), - }; - - let pov = PoV { block_data: BlockData(vec![42; 64]) }; - - let available_data = AvailableData { - validation_data: persisted_validation_data.clone(), - pov: Arc::new(pov), - }; - - let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( - validators.len(), - &available_data, - |_, _| {}, - ); - // Mess around: - let invalid_chunks = chunks - .iter() - .cloned() - .map(|mut chunk| { - if chunk.chunk.len() >= 2 && chunk.chunk[0] != chunk.chunk[1] { - chunk.chunk[0] = chunk.chunk[1]; - } else if chunk.chunk.len() >= 1 { - chunk.chunk[0] = !chunk.chunk[0]; - } else { - chunk.proof = Proof::dummy_proof(); - } - chunk - }) - .collect(); - debug_assert_ne!(chunks, invalid_chunks); - - candidate.descriptor.erasure_root = erasure_root; - candidate.descriptor.relay_parent = Hash::repeat_byte(10); - - Self { - validators, - validator_public, - validator_authority_id, - current, - candidate, - session_index, - persisted_validation_data, - available_data, - chunks, - invalid_chunks, - } - } +/// Map the chunks to the validators according to the availability chunk mapping algorithm. +fn map_chunks( + chunks: Vec, + node_features: &NodeFeatures, + n_validators: usize, + core_index: CoreIndex, +) -> IndexedVec { + let chunk_indices = + availability_chunk_indices(Some(node_features), n_validators, core_index).unwrap(); + + (0..n_validators) + .map(|val_idx| chunks[chunk_indices[val_idx].0 as usize].clone()) + .collect::>() + .into() } -#[test] -fn availability_is_recovered_from_chunks_if_no_group_provided() { +#[rstest] +#[case(true)] +#[case(false)] +fn availability_is_recovered_from_chunks_if_no_group_provided(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_fast_path_then_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -495,12 +766,15 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -512,8 +786,9 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -533,16 +808,31 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { new_candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + threshold, + |_| Has::No, + systematic_recovery, + ) + .await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + + // Even if the recovery is systematic, we'll always fall back to regular recovery, so keep + // this around. test_state .test_chunk_requests( &req_protocol_names, @@ -550,6 +840,7 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { &mut virtual_overseer, test_state.impossibility_threshold(), |_| Has::No, + false, ) .await; @@ -559,15 +850,33 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { }); } -#[test] -fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only() { - let test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only( + #[case] systematic_recovery: bool, +) { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); - + let test_state = TestState::default(); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, @@ -586,12 +895,15 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -603,8 +915,9 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -623,41 +936,80 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk AvailabilityRecoveryMessage::RecoverAvailableData( new_candidate.clone(), test_state.session_index, - None, + Some(GroupIndex(1)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; - test_state - .test_chunk_requests( - &req_protocol_names, - new_candidate.hash(), - &mut virtual_overseer, - test_state.impossibility_threshold(), - |_| Has::No, - ) - .await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + threshold * SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT as usize, + |_| Has::No, + systematic_recovery, + ) + .await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + // Even if the recovery is systematic, we'll always fall back to regular recovery, so + // keep this around. + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold() - threshold, + |_| Has::No, + false, + ) + .await; + + // A request times out with `Unavailable` error. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + } else { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold(), + |_| Has::No, + false, + ) + .await; - // A request times out with `Unavailable` error. - assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + // A request times out with `Unavailable` error. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + } virtual_overseer }); } -#[test] -fn bad_merkle_path_leads_to_recovery_error() { - let mut test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn bad_merkle_path_leads_to_recovery_error(#[case] systematic_recovery: bool) { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let mut test_state = TestState::default(); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -677,25 +1029,40 @@ fn bad_merkle_path_leads_to_recovery_error() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); // Create some faulty chunks. - test_state.chunks[0].chunk = vec![0; 32]; - test_state.chunks[1].chunk = vec![1; 32]; - test_state.chunks[2].chunk = vec![2; 32]; - test_state.chunks[3].chunk = vec![3; 32]; - test_state.chunks[4].chunk = vec![4; 32]; + for chunk in test_state.chunks.iter_mut() { + chunk.chunk = vec![0; 32]; + } test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| Has::No, + systematic_recovery, + ) + .await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + test_state .test_chunk_requests( &req_protocol_names, @@ -703,6 +1070,7 @@ fn bad_merkle_path_leads_to_recovery_error() { &mut virtual_overseer, test_state.impossibility_threshold(), |_| Has::Yes, + false, ) .await; @@ -712,14 +1080,24 @@ fn bad_merkle_path_leads_to_recovery_error() { }); } -#[test] -fn wrong_chunk_index_leads_to_recovery_error() { +#[rstest] +#[case(true)] +#[case(false)] +fn wrong_chunk_index_leads_to_recovery_error(#[case] systematic_recovery: bool) { let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -739,32 +1117,55 @@ fn wrong_chunk_index_leads_to_recovery_error() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; - let candidate_hash = test_state.candidate.hash(); + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; - // These chunks should fail the index check as they don't have the correct index for - // validator. - test_state.chunks[1] = test_state.chunks[0].clone(); - test_state.chunks[2] = test_state.chunks[0].clone(); - test_state.chunks[3] = test_state.chunks[0].clone(); - test_state.chunks[4] = test_state.chunks[0].clone(); + let candidate_hash = test_state.candidate.hash(); test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + // Chunks should fail the index check as they don't have the correct index. + + // *(test_state.chunks.get_mut(0.into()).unwrap()) = + // test_state.chunks.get(1.into()).unwrap().clone(); + let first_chunk = test_state.chunks.get(0.into()).unwrap().clone(); + for c_index in 1..test_state.chunks.len() { + *(test_state.chunks.get_mut(ValidatorIndex(c_index as u32)).unwrap()) = + first_chunk.clone(); + } + + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| Has::Yes, + // We set this to false, as we know we will be requesting the wrong indices. + false, + ) + .await; + + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.impossibility_threshold(), - |_| Has::No, + test_state.chunks.len() - 1, + |_| Has::Yes, + false, ) .await; @@ -774,14 +1175,30 @@ fn wrong_chunk_index_leads_to_recovery_error() { }); } -#[test] -fn invalid_erasure_coding_leads_to_invalid_error() { +#[rstest] +#[case(true)] +#[case(false)] +fn invalid_erasure_coding_leads_to_invalid_error(#[case] systematic_recovery: bool) { let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_fast_path_then_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { let pov = PoV { block_data: BlockData(vec![69; 64]) }; @@ -795,7 +1212,12 @@ fn invalid_erasure_coding_leads_to_invalid_error() { |i, chunk| *chunk = vec![i as u8; 32], ); - test_state.chunks = bad_chunks; + test_state.chunks = map_chunks( + bad_chunks, + &test_state.node_features, + test_state.validators.len(), + test_state.core_index, + ); test_state.candidate.descriptor.erasure_root = bad_erasure_root; let candidate_hash = test_state.candidate.hash(); @@ -817,12 +1239,15 @@ fn invalid_erasure_coding_leads_to_invalid_error() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; @@ -832,8 +1257,9 @@ fn invalid_erasure_coding_leads_to_invalid_error() { &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -843,12 +1269,74 @@ fn invalid_erasure_coding_leads_to_invalid_error() { }); } +#[test] +fn invalid_pov_hash_leads_to_invalid_error() { + let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::for_collator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + let pov = PoV { block_data: BlockData(vec![69; 64]) }; + + test_state.candidate.descriptor.pov_hash = pov.hash(); + + let candidate_hash = test_state.candidate.hash(); + + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.threshold(), + |_| Has::Yes, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Invalid); + virtual_overseer + }); +} + #[test] fn fast_path_backing_group_recovers() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + let subsystem = with_fast_path( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -870,12 +1358,14 @@ fn fast_path_backing_group_recovers() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -892,6 +1382,7 @@ fn fast_path_backing_group_recovers() { candidate_hash, &mut virtual_overseer, who_has, + GroupIndex(0), ) .await; @@ -901,15 +1392,47 @@ fn fast_path_backing_group_recovers() { }); } -#[test] -fn recovers_from_only_chunks_if_pov_large() { - let test_state = TestState::default(); +#[rstest] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn recovers_from_only_chunks_if_pov_large( + #[case] systematic_recovery: bool, + #[case] for_collator: bool, +) { + let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( - Some(FETCH_CHUNKS_THRESHOLD), - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let (subsystem, threshold) = match (systematic_recovery, for_collator) { + (true, false) => ( + with_systematic_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + (false, false) => ( + with_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + (false, true) => { + test_state.candidate.descriptor.pov_hash = test_state.available_data.pov.hash(); + ( + AvailabilityRecoverySubsystem::for_collator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ) + }, + (_, _) => unreachable!(), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -929,12 +1452,15 @@ fn recovers_from_only_chunks_if_pov_large() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -947,16 +1473,19 @@ fn recovers_from_only_chunks_if_pov_large() { } ); - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; - test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if !for_collator { + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -975,14 +1504,13 @@ fn recovers_from_only_chunks_if_pov_large() { AvailabilityRecoveryMessage::RecoverAvailableData( new_candidate.clone(), test_state.session_index, - Some(GroupIndex(0)), + Some(GroupIndex(1)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::AvailabilityStore( @@ -992,18 +1520,48 @@ fn recovers_from_only_chunks_if_pov_large() { } ); - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; - test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if !for_collator { + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } - test_state - .test_chunk_requests( - &req_protocol_names, - new_candidate.hash(), - &mut virtual_overseer, - test_state.impossibility_threshold(), - |_| Has::No, - ) - .await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.systematic_threshold() * SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT as usize, + |_| Has::No, + systematic_recovery, + ) + .await; + if !for_collator { + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + // Even if the recovery is systematic, we'll always fall back to regular recovery. + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold() - threshold, + |_| Has::No, + false, + ) + .await; + } else { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold(), + |_| Has::No, + false, + ) + .await; + } // A request times out with `Unavailable` error. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); @@ -1011,15 +1569,40 @@ fn recovers_from_only_chunks_if_pov_large() { }); } -#[test] -fn fast_path_backing_group_recovers_if_pov_small() { - let test_state = TestState::default(); +#[rstest] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn fast_path_backing_group_recovers_if_pov_small( + #[case] systematic_recovery: bool, + #[case] for_collator: bool, +) { + let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( - Some(FETCH_CHUNKS_THRESHOLD), - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + + let subsystem = match (systematic_recovery, for_collator) { + (true, false) => with_systematic_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + + (false, false) => with_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + (false, true) => { + test_state.candidate.descriptor.pov_hash = test_state.available_data.pov.hash(); + AvailabilityRecoverySubsystem::for_collator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ) + }, + (_, _) => unreachable!(), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1039,12 +1622,15 @@ fn fast_path_backing_group_recovers_if_pov_small() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -1062,7 +1648,9 @@ fn fast_path_backing_group_recovers_if_pov_small() { } ); - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + if !for_collator { + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + } test_state .test_full_data_requests( @@ -1070,6 +1658,7 @@ fn fast_path_backing_group_recovers_if_pov_small() { candidate_hash, &mut virtual_overseer, who_has, + GroupIndex(0), ) .await; @@ -1079,14 +1668,31 @@ fn fast_path_backing_group_recovers_if_pov_small() { }); } -#[test] -fn no_answers_in_fast_path_causes_chunk_requests() { +#[rstest] +#[case(true)] +#[case(false)] +fn no_answers_in_fast_path_causes_chunk_requests(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_fast_path_then_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1106,12 +1712,15 @@ fn no_answers_in_fast_path_causes_chunk_requests() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -1129,6 +1738,7 @@ fn no_answers_in_fast_path_causes_chunk_requests() { candidate_hash, &mut virtual_overseer, who_has, + GroupIndex(0), ) .await; @@ -1139,8 +1749,9 @@ fn no_answers_in_fast_path_causes_chunk_requests() { &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -1150,14 +1761,25 @@ fn no_answers_in_fast_path_causes_chunk_requests() { }); } -#[test] -fn task_canceled_when_receivers_dropped() { +#[rstest] +#[case(true)] +#[case(false)] +fn task_canceled_when_receivers_dropped(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1177,12 +1799,15 @@ fn task_canceled_when_receivers_dropped() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; for _ in 0..test_state.validators.len() { match virtual_overseer.recv().timeout(TIMEOUT).await { @@ -1195,14 +1820,24 @@ fn task_canceled_when_receivers_dropped() { }); } -#[test] -fn chunks_retry_until_all_nodes_respond() { +#[rstest] +#[case(true)] +#[case(false)] +fn chunks_retry_until_all_nodes_respond(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1221,30 +1856,51 @@ fn chunks_retry_until_all_nodes_respond() { AvailabilityRecoveryMessage::RecoverAvailableData( test_state.candidate.clone(), test_state.session_index, - Some(GroupIndex(0)), + None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if systematic_recovery { + for _ in 0..SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| Has::timeout(), + true, + ) + .await; + } + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.validators.len() - test_state.threshold(), + test_state.impossibility_threshold(), |_| Has::timeout(), + false, ) .await; - // we get to go another round! + // We get to go another round! Actually, we get to go `REGULAR_CHUNKS_REQ_RETRY_LIMIT` + // number of times. test_state .test_chunk_requests( &req_protocol_names, @@ -1252,21 +1908,23 @@ fn chunks_retry_until_all_nodes_respond() { &mut virtual_overseer, test_state.impossibility_threshold(), |_| Has::No, + false, ) .await; - // Recovered data should match the original one. + // Recovery is impossible. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); virtual_overseer }); } #[test] -fn not_returning_requests_wont_stall_retrieval() { +fn network_bridge_not_returning_responses_wont_stall_retrieval() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + let subsystem = with_chunks_only( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -1288,12 +1946,15 @@ fn not_returning_requests_wont_stall_retrieval() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -1311,6 +1972,7 @@ fn not_returning_requests_wont_stall_retrieval() { &mut virtual_overseer, not_returning_count, |_| Has::DoesNotReturn, + false, ) .await; @@ -1322,6 +1984,7 @@ fn not_returning_requests_wont_stall_retrieval() { // Should start over: test_state.validators.len() + 3, |_| Has::timeout(), + false, ) .await; @@ -1333,6 +1996,7 @@ fn not_returning_requests_wont_stall_retrieval() { &mut virtual_overseer, test_state.threshold(), |_| Has::Yes, + false, ) .await; @@ -1342,14 +2006,24 @@ fn not_returning_requests_wont_stall_retrieval() { }); } -#[test] -fn all_not_returning_requests_still_recovers_on_return() { +#[rstest] +#[case(true)] +#[case(false)] +fn all_not_returning_requests_still_recovers_on_return(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1368,46 +2042,64 @@ fn all_not_returning_requests_still_recovers_on_return() { AvailabilityRecoveryMessage::RecoverAvailableData( test_state.candidate.clone(), test_state.session_index, - Some(GroupIndex(0)), + None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + let n = if systematic_recovery { + test_state.systematic_threshold() + } else { + test_state.validators.len() + }; let senders = test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.validators.len(), + n, |_| Has::DoesNotReturn, + systematic_recovery, ) .await; future::join( async { Delay::new(Duration::from_millis(10)).await; - // Now retrieval should be able to recover. + // Now retrieval should be able progress. std::mem::drop(senders); }, - test_state.test_chunk_requests( - &req_protocol_names, - candidate_hash, - &mut virtual_overseer, - // Should start over: - test_state.validators.len() + 3, - |_| Has::timeout(), - ), + async { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + // Should start over: + n, + |_| Has::timeout(), + systematic_recovery, + ) + .await + }, ) .await; + if systematic_recovery { + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + // we get to go another round! test_state .test_chunk_requests( @@ -1416,6 +2108,7 @@ fn all_not_returning_requests_still_recovers_on_return() { &mut virtual_overseer, test_state.threshold(), |_| Has::Yes, + false, ) .await; @@ -1425,14 +2118,24 @@ fn all_not_returning_requests_still_recovers_on_return() { }); } -#[test] -fn returns_early_if_we_have_the_data() { +#[rstest] +#[case(true)] +#[case(false)] +fn returns_early_if_we_have_the_data(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1452,12 +2155,15 @@ fn returns_early_if_we_have_the_data() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; test_state.respond_to_available_data_query(&mut virtual_overseer, true).await; assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); @@ -1466,11 +2172,12 @@ fn returns_early_if_we_have_the_data() { } #[test] -fn does_not_query_local_validator() { +fn returns_early_if_present_in_the_subsystem_cache() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + let subsystem = with_fast_path( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -1491,36 +2198,222 @@ fn does_not_query_local_validator() { AvailabilityRecoveryMessage::RecoverAvailableData( test_state.candidate.clone(), test_state.session_index, - None, + Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; - test_state.respond_to_query_all_request(&mut virtual_overseer, |i| i == 0).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); + let who_has = |i| match i { + 3 => Has::Yes, + _ => Has::No, + }; + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state - .test_chunk_requests( + .test_full_data_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.validators.len(), - |i| if i == 0 { panic!("requested from local validator") } else { Has::timeout() }, + who_has, + GroupIndex(0), ) .await; - // second round, make sure it uses the local chunk. - test_state - .test_chunk_requests( + // Recovered data should match the original one. + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + + // A second recovery for the same candidate will return early as it'll be present in the + // cache. + let (tx, rx) = oneshot::channel(); + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + Some(GroupIndex(0)), + Some(test_state.core_index), + tx, + ), + ) + .await; + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn does_not_query_local_validator(#[case] systematic_recovery: bool) { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state + .respond_to_query_all_request(&mut virtual_overseer, |i| i.0 == 0) + .await; + + let candidate_hash = test_state.candidate.hash(); + + // second round, make sure it uses the local chunk. + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + threshold - 1, + |i| if i.0 == 0 { panic!("requested from local validator") } else { Has::Yes }, + systematic_recovery, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn invalid_local_chunk(#[case] systematic_recovery: bool) { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + + let validator_index_for_first_chunk = test_state + .chunks + .iter() + .enumerate() + .find_map(|(val_idx, chunk)| if chunk.index.0 == 0 { Some(val_idx) } else { None }) + .unwrap() as u32; + + test_state + .respond_to_query_all_request_invalid(&mut virtual_overseer, |i| { + i.0 == validator_index_for_first_chunk + }) + .await; + + let candidate_hash = test_state.candidate.hash(); + + // If systematic recovery detects invalid local chunk, it'll directly go to regular + // recovery, if we were the one holding an invalid chunk. + if systematic_recovery { + test_state + .respond_to_query_all_request_invalid(&mut virtual_overseer, |i| { + i.0 == validator_index_for_first_chunk + }) + .await; + } + + test_state + .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold() - 1, - |i| if i == 0 { panic!("requested from local validator") } else { Has::Yes }, + test_state.threshold(), + |i| { + if i.0 == validator_index_for_first_chunk { + panic!("requested from local validator") + } else { + Has::Yes + } + }, + false, ) .await; @@ -1530,14 +2423,439 @@ fn does_not_query_local_validator() { } #[test] -fn invalid_local_chunk_is_ignored() { +fn systematic_chunks_are_not_requested_again_in_regular_recovery() { + // Run this test multiple times, as the order in which requests are made is random and we want + // to make sure that we catch regressions. + for _ in 0..TestState::default().chunks.len() { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let validator_index_for_first_chunk = test_state + .chunks + .iter() + .enumerate() + .find_map(|(val_idx, chunk)| if chunk.index.0 == 0 { Some(val_idx) } else { None }) + .unwrap() as u32; + + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + test_state.systematic_threshold(), + |i| if i.0 == validator_index_for_first_chunk { Has::No } else { Has::Yes }, + true, + ) + .await; + + // Falls back to regular recovery, since one validator returned a fatal error. + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + 1, + |i| { + if (test_state.chunks.get(i).unwrap().index.0 as usize) < + test_state.systematic_threshold() + { + panic!("Already requested") + } else { + Has::Yes + } + }, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); + } +} + +#[rstest] +#[case(true, true)] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn chunk_indices_are_mapped_to_different_validators( + #[case] systematic_recovery: bool, + #[case] mapping_enabled: bool, +) { + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let test_state = match mapping_enabled { + true => TestState::default(), + false => TestState::with_empty_node_features(), + }; + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, _rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let mut chunk_indices: Vec<(u32, u32)> = vec![]; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::SendRequests( + requests, + _if_disconnected, + ) + ) => { + for req in requests { + assert_matches!( + req, + Requests::ChunkFetching(req) => { + assert_eq!(req.payload.candidate_hash, test_state.candidate.hash()); + + let validator_index = req.payload.index; + let chunk_index = test_state.chunks.get(validator_index).unwrap().index; + + if systematic_recovery && mapping_enabled { + assert!((chunk_index.0 as usize) <= test_state.systematic_threshold(), "requested non-systematic chunk"); + } + + chunk_indices.push((chunk_index.0, validator_index.0)); + } + ) + } + } + ); + + if mapping_enabled { + assert!(!chunk_indices.iter().any(|(c_index, v_index)| c_index == v_index)); + } else { + assert!(chunk_indices.iter().all(|(c_index, v_index)| c_index == v_index)); + } + + virtual_overseer + }); +} + +#[rstest] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn number_of_request_retries_is_bounded( + #[case] systematic_recovery: bool, + #[case] should_fail: bool, +) { + let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + // We need the number of validators to be evenly divisible by the threshold for this test to be + // easier to write. + let n_validators = 6; + test_state.validators.truncate(n_validators); + test_state.validator_authority_id.truncate(n_validators); + let mut temp = test_state.validator_public.to_vec(); + temp.truncate(n_validators); + test_state.validator_public = temp.into(); + + let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + n_validators, + &test_state.available_data, + |_, _| {}, + ); + test_state.chunks = + map_chunks(chunks, &test_state.node_features, n_validators, test_state.core_index); + test_state.candidate.descriptor.erasure_root = erasure_root; + + let (subsystem, retry_limit) = match systematic_recovery { + false => ( + with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + ), + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let validator_count_per_iteration = if systematic_recovery { + test_state.systematic_threshold() + } else { + test_state.chunks.len() + }; + + // Network errors are considered non-fatal but should be retried a limited number of times. + for _ in 1..retry_limit { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + validator_count_per_iteration, + |_| Has::timeout(), + systematic_recovery, + ) + .await; + } + + if should_fail { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + validator_count_per_iteration, + |_| Has::timeout(), + systematic_recovery, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + } else { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + test_state.threshold(), + |_| Has::Yes, + systematic_recovery, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + } + + virtual_overseer + }); +} + +#[test] +fn systematic_recovery_retries_from_backers() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + let subsystem = with_systematic_chunks( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + let group_index = GroupIndex(2); + let group_size = test_state.validator_groups.get(group_index).unwrap().len(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + Some(group_index), + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let mut cnt = 0; + + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| { + let res = if cnt < group_size { Has::timeout() } else { Has::Yes }; + cnt += 1; + res + }, + true, + ) + .await; + + // Exhaust retries. + for _ in 0..(SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT - 1) { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + group_size, + |_| Has::No, + true, + ) + .await; + } + + // Now, final chance is to try from a backer. + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + group_size, + |_| Has::Yes, + true, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn test_legacy_network_protocol_with_mapping_disabled(#[case] systematic_recovery: bool) { + // In this case, when the mapping is disabled, recovery will work with both v2 and v1 requests, + // under the assumption that ValidatorIndex is always equal to ChunkIndex. However, systematic + // recovery will not be possible, it will fall back to regular recovery. + let test_state = TestState::with_empty_node_features(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, @@ -1556,30 +2874,250 @@ fn invalid_local_chunk_is_ignored() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + let candidate_hash = test_state.candidate.hash(); + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + test_state - .respond_to_query_all_request_invalid(&mut virtual_overseer, |i| i == 0) + .test_chunk_requests_v1( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + threshold, + |_| Has::Yes, + false, + ) .await; + // Recovered data should match the original one. + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn test_legacy_network_protocol_with_mapping_enabled(#[case] systematic_recovery: bool) { + // In this case, when the mapping is enabled, we MUST only use v2. Recovery should fail for v1. + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + let candidate_hash = test_state.candidate.hash(); + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + if systematic_recovery { + test_state + .test_chunk_requests_v1( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + threshold, + |_| Has::Yes, + systematic_recovery, + ) + .await; + + // Systematic recovery failed, trying regular recovery. + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + + test_state + .test_chunk_requests_v1( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.validators.len() - test_state.threshold(), + |_| Has::Yes, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + virtual_overseer + }); +} + +#[test] +fn test_systematic_recovery_skipped_if_no_core_index() { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + None, + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + let candidate_hash = test_state.candidate.hash(); + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + // Systematic recovery not possible without core index, falling back to regular recovery. test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold() - 1, - |i| if i == 0 { panic!("requested from local validator") } else { Has::Yes }, + test_state.validators.len() - test_state.threshold(), + |_| Has::No, + false, ) .await; - assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + // Make it fail, in order to assert that indeed regular recovery was attempted. If it were + // systematic recovery, we would have had one more attempt for regular reconstruction. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + virtual_overseer + }); +} + +#[test] +fn test_systematic_recovery_skipped_if_mapping_disabled() { + let test_state = TestState::with_empty_node_features(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::for_validator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + let candidate_hash = test_state.candidate.hash(); + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + // Systematic recovery not possible without core index, falling back to regular recovery. + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.validators.len() - test_state.threshold(), + |_| Has::No, + false, + ) + .await; + + // Make it fail, in order to assert that indeed regular recovery was attempted. If it were + // systematic recovery, we would have had one more attempt for regular reconstruction. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); virtual_overseer }); } diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index d5be6f01c33..7b6dea74857 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -301,7 +301,15 @@ where for req in reqs { match req { - Requests::ChunkFetchingV1(_) => metrics.on_message("chunk_fetching_v1"), + Requests::ChunkFetching(ref req) => { + // This is not the actual request that will succeed, as we don't know yet + // what that will be. It's only the primary request we tried. + if req.fallback_request.is_some() { + metrics.on_message("chunk_fetching_v2") + } else { + metrics.on_message("chunk_fetching_v1") + } + }, Requests::AvailableDataFetchingV1(_) => metrics.on_message("available_data_fetching_v1"), Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"), diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index cab02bb88a0..fe06593bd7a 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -98,6 +98,10 @@ pub enum Protocol { /// Protocol for requesting candidates with attestations in statement distribution /// when async backing is enabled. AttestedCandidateV2, + + /// Protocol for chunk fetching version 2, used by availability distribution and availability + /// recovery. + ChunkFetchingV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -209,7 +213,7 @@ impl Protocol { let name = req_protocol_names.get_name(self); let legacy_names = self.get_legacy_name().into_iter().map(Into::into).collect(); match self { - Protocol::ChunkFetchingV1 => N::request_response_config( + Protocol::ChunkFetchingV1 | Protocol::ChunkFetchingV2 => N::request_response_config( name, legacy_names, 1_000, @@ -292,7 +296,7 @@ impl Protocol { // times (due to network delays), 100 seems big enough to accommodate for "bursts", // assuming we can service requests relatively quickly, which would need to be measured // as well. - Protocol::ChunkFetchingV1 => 100, + Protocol::ChunkFetchingV1 | Protocol::ChunkFetchingV2 => 100, // 10 seems reasonable, considering group sizes of max 10 validators. Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => 10, // 10 seems reasonable, considering group sizes of max 10 validators. @@ -362,6 +366,7 @@ impl Protocol { // Introduced after legacy names became legacy. Protocol::AttestedCandidateV2 => None, Protocol::CollationFetchingV2 => None, + Protocol::ChunkFetchingV2 => None, } } } @@ -412,6 +417,7 @@ impl ReqProtocolNames { }; let short_name = match protocol { + // V1: Protocol::ChunkFetchingV1 => "/req_chunk/1", Protocol::CollationFetchingV1 => "/req_collation/1", Protocol::PoVFetchingV1 => "/req_pov/1", @@ -419,8 +425,10 @@ impl ReqProtocolNames { Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", + // V2: Protocol::CollationFetchingV2 => "/req_collation/2", Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", + Protocol::ChunkFetchingV2 => "/req_chunk/2", }; format!("{}{}", prefix, short_name).into() diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs index 96ef4a6ab25..f578c4ffded 100644 --- a/polkadot/node/network/protocol/src/request_response/outgoing.rs +++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs @@ -30,7 +30,7 @@ use super::{v1, v2, IsRequest, Protocol}; #[derive(Debug)] pub enum Requests { /// Request an availability chunk from a node. - ChunkFetchingV1(OutgoingRequest), + ChunkFetching(OutgoingRequest), /// Fetch a collation from a collator which previously announced it. CollationFetchingV1(OutgoingRequest), /// Fetch a PoV from a validator which previously sent out a seconded statement. @@ -59,7 +59,7 @@ impl Requests { /// contained in the `enum`. pub fn encode_request(self) -> (Protocol, OutgoingRequest>) { match self { - Self::ChunkFetchingV1(r) => r.encode_request(), + Self::ChunkFetching(r) => r.encode_request(), Self::CollationFetchingV1(r) => r.encode_request(), Self::CollationFetchingV2(r) => r.encode_request(), Self::PoVFetchingV1(r) => r.encode_request(), @@ -164,24 +164,20 @@ where /// /// Returns a raw `Vec` response over the channel. Use the associated `ProtocolName` to know /// which request was the successful one and appropriately decode the response. - // WARNING: This is commented for now because it's not used yet. - // If you need it, make sure to test it. You may need to enable the V1 substream upgrade - // protocol, unless libp2p was in the meantime updated to a version that fixes the problem - // described in https://github.com/libp2p/rust-libp2p/issues/5074 - // pub fn new_with_fallback( - // peer: Recipient, - // payload: Req, - // fallback_request: FallbackReq, - // ) -> (Self, impl Future, ProtocolName)>>) { - // let (tx, rx) = oneshot::channel(); - // let r = Self { - // peer, - // payload, - // pending_response: tx, - // fallback_request: Some((fallback_request, FallbackReq::PROTOCOL)), - // }; - // (r, async { Ok(rx.await??) }) - // } + pub fn new_with_fallback( + peer: Recipient, + payload: Req, + fallback_request: FallbackReq, + ) -> (Self, impl Future, ProtocolName)>>) { + let (tx, rx) = oneshot::channel(); + let r = Self { + peer, + payload, + pending_response: tx, + fallback_request: Some((fallback_request, FallbackReq::PROTOCOL)), + }; + (r, async { Ok(rx.await??) }) + } /// Encode a request into a `Vec`. /// diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index 60eecb69f73..c503c6e4df0 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -33,7 +33,8 @@ use super::{IsRequest, Protocol}; pub struct ChunkFetchingRequest { /// Hash of candidate we want a chunk for. pub candidate_hash: CandidateHash, - /// The index of the chunk to fetch. + /// The validator index we are requesting from. This must be identical to the index of the + /// chunk we'll receive. For v2, this may not be the case. pub index: ValidatorIndex, } @@ -57,6 +58,15 @@ impl From> for ChunkFetchingResponse { } } +impl From for Option { + fn from(x: ChunkFetchingResponse) -> Self { + match x { + ChunkFetchingResponse::Chunk(c) => Some(c), + ChunkFetchingResponse::NoSuchChunk => None, + } + } +} + /// Skimmed down variant of `ErasureChunk`. /// /// Instead of transmitting a full `ErasureChunk` we transmit `ChunkResponse` in @@ -80,7 +90,7 @@ impl From for ChunkResponse { impl ChunkResponse { /// Re-build an `ErasureChunk` from response and request. pub fn recombine_into_chunk(self, req: &ChunkFetchingRequest) -> ErasureChunk { - ErasureChunk { chunk: self.chunk, proof: self.proof, index: req.index } + ErasureChunk { chunk: self.chunk, proof: self.proof, index: req.index.into() } } } diff --git a/polkadot/node/network/protocol/src/request_response/v2.rs b/polkadot/node/network/protocol/src/request_response/v2.rs index 6b90c579237..7e1a2d98916 100644 --- a/polkadot/node/network/protocol/src/request_response/v2.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -18,12 +18,13 @@ use parity_scale_codec::{Decode, Encode}; +use polkadot_node_primitives::ErasureChunk; use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, - UncheckedSignedStatement, + UncheckedSignedStatement, ValidatorIndex, }; -use super::{IsRequest, Protocol}; +use super::{v1, IsRequest, Protocol}; use crate::v2::StatementFilter; /// Request a candidate with statements. @@ -78,3 +79,60 @@ impl IsRequest for CollationFetchingRequest { type Response = CollationFetchingResponse; const PROTOCOL: Protocol = Protocol::CollationFetchingV2; } + +/// Request an availability chunk. +#[derive(Debug, Copy, Clone, Encode, Decode)] +pub struct ChunkFetchingRequest { + /// Hash of candidate we want a chunk for. + pub candidate_hash: CandidateHash, + /// The validator index we are requesting from. This may not be identical to the index of the + /// chunk we'll receive. It's up to the caller to decide whether they need to validate they got + /// the chunk they were expecting. + pub index: ValidatorIndex, +} + +/// Receive a requested erasure chunk. +#[derive(Debug, Clone, Encode, Decode)] +pub enum ChunkFetchingResponse { + /// The requested chunk data. + #[codec(index = 0)] + Chunk(ErasureChunk), + /// Node was not in possession of the requested chunk. + #[codec(index = 1)] + NoSuchChunk, +} + +impl From> for ChunkFetchingResponse { + fn from(x: Option) -> Self { + match x { + Some(c) => ChunkFetchingResponse::Chunk(c), + None => ChunkFetchingResponse::NoSuchChunk, + } + } +} + +impl From for Option { + fn from(x: ChunkFetchingResponse) -> Self { + match x { + ChunkFetchingResponse::Chunk(c) => Some(c), + ChunkFetchingResponse::NoSuchChunk => None, + } + } +} + +impl From for ChunkFetchingRequest { + fn from(v1::ChunkFetchingRequest { candidate_hash, index }: v1::ChunkFetchingRequest) -> Self { + Self { candidate_hash, index } + } +} + +impl From for v1::ChunkFetchingRequest { + fn from(ChunkFetchingRequest { candidate_hash, index }: ChunkFetchingRequest) -> Self { + Self { candidate_hash, index } + } +} + +impl IsRequest for ChunkFetchingRequest { + type Response = ChunkFetchingResponse; + const PROTOCOL: Protocol = Protocol::ChunkFetchingV2; +} diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 55a6bdb74ba..87484914ef9 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -856,6 +856,7 @@ fn test_availability_recovery_msg() -> AvailabilityRecoveryMessage { dummy_candidate_receipt(dummy_hash()), Default::default(), None, + None, sender, ) } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 67930f8735c..5f007bc8d67 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -30,13 +30,14 @@ use parity_scale_codec::{Decode, Encode, Error as CodecError, Input}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ - BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CollatorPair, + BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, ChunkIndex, CollatorPair, CommittedCandidateReceipt, CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, Id as ParaId, PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, - ValidationCodeHash, ValidatorIndex, MAX_CODE_SIZE, MAX_POV_SIZE, + ValidationCodeHash, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, + Randomness as BabeRandomness, }; pub use polkadot_parachain_primitives::primitives::{ @@ -639,7 +640,7 @@ pub struct ErasureChunk { /// The erasure-encoded chunk of data belonging to the candidate block. pub chunk: Vec, /// The index of this erasure-encoded chunk of data. - pub index: ValidatorIndex, + pub index: ChunkIndex, /// Proof for this chunk's branch in the Merkle tree. pub proof: Proof, } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index f50b9770b41..7c9b9e05d62 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -915,7 +915,10 @@ pub fn new_full< let (pov_req_receiver, cfg) = IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (chunk_req_receiver, cfg) = + let (chunk_req_v1_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (chunk_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); net_config.add_request_response_protocol(cfg); @@ -1000,7 +1003,8 @@ pub fn new_full< candidate_validation_config, availability_config: AVAILABILITY_CONFIG, pov_req_receiver, - chunk_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, statement_req_receiver, candidate_req_v2_receiver, approval_voting_config, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 175a77e1c5f..6f35718cd18 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -119,8 +119,10 @@ pub struct ExtendedOverseerGenArgs { pub availability_config: AvailabilityConfig, /// POV request receiver. pub pov_req_receiver: IncomingRequestReceiver, - /// Erasure chunks request receiver. - pub chunk_req_receiver: IncomingRequestReceiver, + /// Erasure chunk request v1 receiver. + pub chunk_req_v1_receiver: IncomingRequestReceiver, + /// Erasure chunk request v2 receiver. + pub chunk_req_v2_receiver: IncomingRequestReceiver, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, /// Receiver for incoming candidate requests. @@ -163,7 +165,8 @@ pub fn validator_overseer_builder( candidate_validation_config, availability_config, pov_req_receiver, - chunk_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, statement_req_receiver, candidate_req_v2_receiver, approval_voting_config, @@ -226,7 +229,7 @@ where network_service.clone(), authority_discovery_service.clone(), network_bridge_metrics.clone(), - req_protocol_names, + req_protocol_names.clone(), peerset_protocol_names.clone(), notification_sinks.clone(), )) @@ -241,12 +244,18 @@ where )) .availability_distribution(AvailabilityDistributionSubsystem::new( keystore.clone(), - IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + }, + req_protocol_names.clone(), Metrics::register(registry)?, )) - .availability_recovery(AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + .availability_recovery(AvailabilityRecoverySubsystem::for_validator( fetch_chunks_threshold, available_data_req_receiver, + &req_protocol_names, Metrics::register(registry)?, )) .availability_store(AvailabilityStoreSubsystem::new( @@ -412,7 +421,7 @@ where network_service.clone(), authority_discovery_service.clone(), network_bridge_metrics.clone(), - req_protocol_names, + req_protocol_names.clone(), peerset_protocol_names.clone(), notification_sinks.clone(), )) @@ -429,6 +438,7 @@ where .availability_recovery(AvailabilityRecoverySubsystem::for_collator( None, available_data_req_receiver, + &req_protocol_names, Metrics::register(registry)?, )) .availability_store(DummySubsystem) diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 21eaed832c4..ebd9322e9f7 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -89,6 +89,7 @@ paste = "1.0.14" orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } pyroscope = "0.5.7" pyroscope_pprofrs = "0.2.7" +strum = { version = "0.24", features = ["derive"] } [features] default = [] diff --git a/polkadot/node/subsystem-bench/examples/availability_read.yaml b/polkadot/node/subsystem-bench/examples/availability_read.yaml index 82355b0e297..263a6988242 100644 --- a/polkadot/node/subsystem-bench/examples/availability_read.yaml +++ b/polkadot/node/subsystem-bench/examples/availability_read.yaml @@ -1,8 +1,8 @@ TestConfiguration: # Test 1 - objective: !DataAvailabilityRead - fetch_from_backers: true - n_validators: 300 + strategy: FullFromBackers + n_validators: 500 n_cores: 20 min_pov_size: 5120 max_pov_size: 5120 @@ -16,7 +16,7 @@ TestConfiguration: # Test 2 - objective: !DataAvailabilityRead - fetch_from_backers: true + strategy: FullFromBackers n_validators: 500 n_cores: 20 min_pov_size: 5120 @@ -31,7 +31,7 @@ TestConfiguration: # Test 3 - objective: !DataAvailabilityRead - fetch_from_backers: true + strategy: FullFromBackers n_validators: 1000 n_cores: 20 min_pov_size: 5120 diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index f7d65589565..955a8fbac2e 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -17,12 +17,14 @@ use crate::{ availability::av_store_helpers::new_av_store, dummy_builder, - environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + environment::{TestEnvironment, TestEnvironmentDependencies}, mock::{ - av_store::{self, MockAvailabilityStore, NetworkAvailabilityState}, + av_store::{MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::{self, MockRuntimeApi, MockRuntimeApiCoreState}, + runtime_api::{ + node_features_with_chunk_mapping_enabled, MockRuntimeApi, MockRuntimeApiCoreState, + }, AlwaysSupportsParachains, }, network::new_network, @@ -30,16 +32,17 @@ use crate::{ }; use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; + use parity_scale_codec::Encode; use polkadot_availability_bitfield_distribution::BitfieldDistribution; use polkadot_availability_distribution::{ AvailabilityDistributionSubsystem, IncomingRequestReceivers, }; -use polkadot_availability_recovery::AvailabilityRecoverySubsystem; +use polkadot_availability_recovery::{AvailabilityRecoverySubsystem, RecoveryStrategyKind}; use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_network_protocol::{ - request_response::{IncomingRequest, ReqProtocolNames}, + request_response::{v1, v2, IncomingRequest}, OurView, }; use polkadot_node_subsystem::{ @@ -51,12 +54,13 @@ use polkadot_node_subsystem_types::{ Span, }; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; -use polkadot_primitives::{Block, GroupIndex, Hash}; +use polkadot_primitives::{Block, CoreIndex, GroupIndex, Hash}; use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; +use std::{ops::Sub, sync::Arc, time::Instant}; +use strum::Display; use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; -use std::{ops::Sub, sync::Arc, time::Instant}; pub use test_state::TestState; mod av_store_helpers; @@ -64,15 +68,26 @@ mod test_state; const LOG_TARGET: &str = "subsystem-bench::availability"; +#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq, Serialize, Deserialize, Display)] +#[value(rename_all = "kebab-case")] +#[strum(serialize_all = "kebab-case")] +pub enum Strategy { + /// Regular random chunk recovery. This is also the fallback for the next strategies. + Chunks, + /// Recovery from systematic chunks. Much faster than regular chunk recovery becasue it avoid + /// doing the reed-solomon reconstruction. + Systematic, + /// Fetch the full availability datafrom backers first. Saves CPU as we don't need to + /// re-construct from chunks. Typically this is only faster if nodes have enough bandwidth. + FullFromBackers, +} + #[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] #[clap(rename_all = "kebab-case")] #[allow(missing_docs)] pub struct DataAvailabilityReadOptions { - #[clap(short, long, default_value_t = false)] - /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Typically this is only faster if nodes have - /// enough bandwidth. - pub fetch_from_backers: bool, + #[clap(short, long, default_value_t = Strategy::Systematic)] + pub strategy: Strategy, } pub enum TestDataAvailability { @@ -84,7 +99,7 @@ fn build_overseer_for_availability_read( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, av_store: MockAvailabilityStore, - network_bridge: (MockNetworkBridgeTx, MockNetworkBridgeRx), + (network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx), availability_recovery: AvailabilityRecoverySubsystem, dependencies: &TestEnvironmentDependencies, ) -> (Overseer, AlwaysSupportsParachains>, OverseerHandle) { @@ -95,8 +110,8 @@ fn build_overseer_for_availability_read( let builder = dummy .replace_runtime_api(|_| runtime_api) .replace_availability_store(|_| av_store) - .replace_network_bridge_tx(|_| network_bridge.0) - .replace_network_bridge_rx(|_| network_bridge.1) + .replace_network_bridge_tx(|_| network_bridge_tx) + .replace_network_bridge_rx(|_| network_bridge_rx) .replace_availability_recovery(|_| availability_recovery); let (overseer, raw_handle) = @@ -109,7 +124,7 @@ fn build_overseer_for_availability_read( fn build_overseer_for_availability_write( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, - network_bridge: (MockNetworkBridgeTx, MockNetworkBridgeRx), + (network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx), availability_distribution: AvailabilityDistributionSubsystem, chain_api: MockChainApi, availability_store: AvailabilityStoreSubsystem, @@ -123,8 +138,8 @@ fn build_overseer_for_availability_write( let builder = dummy .replace_runtime_api(|_| runtime_api) .replace_availability_store(|_| availability_store) - .replace_network_bridge_tx(|_| network_bridge.0) - .replace_network_bridge_rx(|_| network_bridge.1) + .replace_network_bridge_tx(|_| network_bridge_tx) + .replace_network_bridge_rx(|_| network_bridge_rx) .replace_chain_api(|_| chain_api) .replace_bitfield_distribution(|_| bitfield_distribution) // This is needed to test own chunk recovery for `n_cores`. @@ -142,10 +157,14 @@ pub fn prepare_test( with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { let dependencies = TestEnvironmentDependencies::default(); + let availability_state = NetworkAvailabilityState { candidate_hashes: state.candidate_hashes.clone(), + candidate_hash_to_core_index: state.candidate_hash_to_core_index.clone(), available_data: state.available_data.clone(), chunks: state.chunks.clone(), + chunk_indices: state.chunk_indices.clone(), + req_protocol_names: state.req_protocol_names.clone(), }; let mut req_cfgs = Vec::new(); @@ -153,20 +172,31 @@ pub fn prepare_test( let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); + >(&state.req_protocol_names); req_cfgs.push(collation_req_cfg); let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); - - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver::< - Block, - sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); + >(&state.req_protocol_names); req_cfgs.push(pov_req_cfg); + let (chunk_req_v1_receiver, chunk_req_v1_cfg) = + IncomingRequest::::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&state.req_protocol_names); + + // We won't use v1 chunk fetching requests, but we need to keep the inbound queue alive. + // Otherwise, av-distribution subsystem will terminate. + std::mem::forget(chunk_req_v1_cfg); + + let (chunk_req_v2_receiver, chunk_req_v2_cfg) = + IncomingRequest::::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&state.req_protocol_names); + let (network, network_interface, network_receiver) = new_network( &state.config, &dependencies, @@ -180,9 +210,9 @@ pub fn prepare_test( state.test_authorities.clone(), ); let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg)); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg)); - let runtime_api = runtime_api::MockRuntimeApi::new( + let runtime_api = MockRuntimeApi::new( state.config.clone(), state.test_authorities.clone(), state.candidate_receipts.clone(), @@ -194,24 +224,34 @@ pub fn prepare_test( let (overseer, overseer_handle) = match &mode { TestDataAvailability::Read(options) => { - let use_fast_path = options.fetch_from_backers; - - let subsystem = if use_fast_path { - AvailabilityRecoverySubsystem::with_fast_path( + let subsystem = match options.strategy { + Strategy::FullFromBackers => + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + collation_req_receiver, + &state.req_protocol_names, + Metrics::try_register(&dependencies.registry).unwrap(), + RecoveryStrategyKind::BackersFirstAlways, + ), + Strategy::Chunks => AvailabilityRecoverySubsystem::with_recovery_strategy_kind( collation_req_receiver, + &state.req_protocol_names, Metrics::try_register(&dependencies.registry).unwrap(), - ) - } else { - AvailabilityRecoverySubsystem::with_chunks_only( + RecoveryStrategyKind::ChunksAlways, + ), + Strategy::Systematic => AvailabilityRecoverySubsystem::with_recovery_strategy_kind( collation_req_receiver, + &state.req_protocol_names, Metrics::try_register(&dependencies.registry).unwrap(), - ) + RecoveryStrategyKind::SystematicChunks, + ), }; // Use a mocked av-store. - let av_store = av_store::MockAvailabilityStore::new( + let av_store = MockAvailabilityStore::new( state.chunks.clone(), + state.chunk_indices.clone(), state.candidate_hashes.clone(), + state.candidate_hash_to_core_index.clone(), ); build_overseer_for_availability_read( @@ -226,7 +266,12 @@ pub fn prepare_test( TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( state.test_authorities.keyring.keystore(), - IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + }, + state.req_protocol_names.clone(), Metrics::try_register(&dependencies.registry).unwrap(), ); @@ -296,6 +341,7 @@ pub async fn benchmark_availability_read( Some(GroupIndex( candidate_num as u32 % (std::cmp::max(5, config.n_cores) / 5) as u32, )), + Some(*state.candidate_hash_to_core_index.get(&candidate.hash()).unwrap()), tx, ), ); @@ -341,7 +387,7 @@ pub async fn benchmark_availability_write( env.metrics().set_n_cores(config.n_cores); gum::info!(target: LOG_TARGET, "Seeding availability store with candidates ..."); - for backed_candidate in state.backed_candidates.clone() { + for (core_index, backed_candidate) in state.backed_candidates.clone().into_iter().enumerate() { let candidate_index = *state.candidate_hashes.get(&backed_candidate.hash()).unwrap(); let available_data = state.available_data[candidate_index].clone(); let (tx, rx) = oneshot::channel(); @@ -352,6 +398,8 @@ pub async fn benchmark_availability_write( available_data, expected_erasure_root: backed_candidate.descriptor().erasure_root, tx, + core_index: CoreIndex(core_index as u32), + node_features: node_features_with_chunk_mapping_enabled(), }, )) .await; diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs index c328ffedf91..5d443734bb3 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -14,22 +14,28 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::configuration::{TestAuthorities, TestConfiguration}; +use crate::{ + configuration::{TestAuthorities, TestConfiguration}, + environment::GENESIS_HASH, + mock::runtime_api::node_features_with_chunk_mapping_enabled, +}; use bitvec::bitvec; use colored::Colorize; use itertools::Itertools; use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ - request_response::v1::ChunkFetchingRequest, Versioned, VersionedValidationProtocol, + request_response::{v2::ChunkFetchingRequest, ReqProtocolNames}, + Versioned, VersionedValidationProtocol, }; use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_test_helpers::{ derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, }; +use polkadot_node_subsystem_util::availability_chunks::availability_chunk_indices; use polkadot_overseer::BlockInfo; use polkadot_primitives::{ - AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, Hash, HeadData, Header, - PersistedValidationData, Signed, SigningContext, ValidatorIndex, + AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, + Hash, HeadData, Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; use sp_core::H256; @@ -49,14 +55,20 @@ pub struct TestState { pub pov_size_to_candidate: HashMap, // Map from generated candidate hashes to candidate index in `available_data` and `chunks`. pub candidate_hashes: HashMap, + // Map from candidate hash to occupied core index. + pub candidate_hash_to_core_index: HashMap, // Per candidate index receipts. pub candidate_receipt_templates: Vec, // Per candidate index `AvailableData` pub available_data: Vec, - // Per candiadte index chunks + // Per candidate index chunks pub chunks: Vec>, + // Per-core ValidatorIndex -> ChunkIndex mapping + pub chunk_indices: Vec>, // Per relay chain block - candidate backed by our backing group pub backed_candidates: Vec, + // Request protcol names + pub req_protocol_names: ReqProtocolNames, // Relay chain block infos pub block_infos: Vec, // Chung fetching requests for backed candidates @@ -89,6 +101,9 @@ impl TestState { candidate_receipts: Default::default(), block_headers: Default::default(), test_authorities: config.generate_authorities(), + req_protocol_names: ReqProtocolNames::new(GENESIS_HASH, None), + chunk_indices: Default::default(), + candidate_hash_to_core_index: Default::default(), }; // we use it for all candidates. @@ -99,6 +114,17 @@ impl TestState { relay_parent_storage_root: Default::default(), }; + test_state.chunk_indices = (0..config.n_cores) + .map(|core_index| { + availability_chunk_indices( + Some(&node_features_with_chunk_mapping_enabled()), + config.n_validators, + CoreIndex(core_index as u32), + ) + .unwrap() + }) + .collect(); + // For each unique pov we create a candidate receipt. for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); @@ -167,6 +193,11 @@ impl TestState { // Store the new candidate in the state test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); + let core_index = (index % config.n_cores) as u32; + test_state + .candidate_hash_to_core_index + .insert(candidate_receipt.hash(), core_index.into()); + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); candidate_receipt diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index a035bf01897..14ec4ccb4c3 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -20,7 +20,7 @@ use crate::network::{HandleNetworkMessage, NetworkMessage}; use futures::{channel::oneshot, FutureExt}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ - v1::{AvailableDataFetchingResponse, ChunkFetchingResponse, ChunkResponse}, + v1::AvailableDataFetchingResponse, v2::ChunkFetchingResponse, Protocol, ReqProtocolNames, Requests, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; @@ -28,13 +28,14 @@ use polkadot_node_subsystem::{ messages::AvailabilityStoreMessage, overseer, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_types::OverseerSignal; -use polkadot_primitives::CandidateHash; -use sc_network::ProtocolName; +use polkadot_primitives::{CandidateHash, ChunkIndex, CoreIndex, ValidatorIndex}; use std::collections::HashMap; pub struct AvailabilityStoreState { candidate_hashes: HashMap, chunks: Vec>, + chunk_indices: Vec>, + candidate_hash_to_core_index: HashMap, } const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; @@ -43,9 +44,12 @@ const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; /// used in a test. #[derive(Clone)] pub struct NetworkAvailabilityState { + pub req_protocol_names: ReqProtocolNames, pub candidate_hashes: HashMap, pub available_data: Vec, pub chunks: Vec>, + pub chunk_indices: Vec>, + pub candidate_hash_to_core_index: HashMap, } // Implement access to the state. @@ -58,7 +62,7 @@ impl HandleNetworkMessage for NetworkAvailabilityState { ) -> Option { match message { NetworkMessage::RequestFromNode(peer, request) => match request { - Requests::ChunkFetchingV1(outgoing_request) => { + Requests::ChunkFetching(outgoing_request) => { gum::debug!(target: LOG_TARGET, request = ?outgoing_request, "Received `RequestFromNode`"); let validator_index: usize = outgoing_request.payload.index.0 as usize; let candidate_hash = outgoing_request.payload.candidate_hash; @@ -69,11 +73,22 @@ impl HandleNetworkMessage for NetworkAvailabilityState { .expect("candidate was generated previously; qed"); gum::warn!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); - let chunk: ChunkResponse = - self.chunks.get(*candidate_index).unwrap()[validator_index].clone().into(); + let candidate_chunks = self.chunks.get(*candidate_index).unwrap(); + let chunk_indices = self + .chunk_indices + .get( + self.candidate_hash_to_core_index.get(&candidate_hash).unwrap().0 + as usize, + ) + .unwrap(); + + let chunk = candidate_chunks + .get(chunk_indices.get(validator_index).unwrap().0 as usize) + .unwrap(); + let response = Ok(( - ChunkFetchingResponse::from(Some(chunk)).encode(), - ProtocolName::Static("dummy"), + ChunkFetchingResponse::from(Some(chunk.clone())).encode(), + self.req_protocol_names.get_name(Protocol::ChunkFetchingV2), )); if let Err(err) = outgoing_request.pending_response.send(response) { @@ -94,7 +109,7 @@ impl HandleNetworkMessage for NetworkAvailabilityState { let response = Ok(( AvailableDataFetchingResponse::from(Some(available_data)).encode(), - ProtocolName::Static("dummy"), + self.req_protocol_names.get_name(Protocol::AvailableDataFetchingV1), )); outgoing_request .pending_response @@ -119,16 +134,25 @@ pub struct MockAvailabilityStore { impl MockAvailabilityStore { pub fn new( chunks: Vec>, + chunk_indices: Vec>, candidate_hashes: HashMap, + candidate_hash_to_core_index: HashMap, ) -> MockAvailabilityStore { - Self { state: AvailabilityStoreState { chunks, candidate_hashes } } + Self { + state: AvailabilityStoreState { + chunks, + candidate_hashes, + chunk_indices, + candidate_hash_to_core_index, + }, + } } async fn respond_to_query_all_request( &self, candidate_hash: CandidateHash, - send_chunk: impl Fn(usize) -> bool, - tx: oneshot::Sender>, + send_chunk: impl Fn(ValidatorIndex) -> bool, + tx: oneshot::Sender>, ) { let candidate_index = self .state @@ -137,15 +161,27 @@ impl MockAvailabilityStore { .expect("candidate was generated previously; qed"); gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); - let v = self - .state - .chunks - .get(*candidate_index) - .unwrap() - .iter() - .filter(|c| send_chunk(c.index.0 as usize)) - .cloned() - .collect(); + let n_validators = self.state.chunks[0].len(); + let candidate_chunks = self.state.chunks.get(*candidate_index).unwrap(); + let core_index = self.state.candidate_hash_to_core_index.get(&candidate_hash).unwrap(); + // We'll likely only send our chunk, so use capacity 1. + let mut v = Vec::with_capacity(1); + + for validator_index in 0..n_validators { + if !send_chunk(ValidatorIndex(validator_index as u32)) { + continue; + } + let chunk_index = self + .state + .chunk_indices + .get(core_index.0 as usize) + .unwrap() + .get(validator_index) + .unwrap(); + + let chunk = candidate_chunks.get(chunk_index.0 as usize).unwrap().clone(); + v.push((ValidatorIndex(validator_index as u32), chunk.clone())); + } let _ = tx.send(v); } @@ -182,8 +218,12 @@ impl MockAvailabilityStore { AvailabilityStoreMessage::QueryAllChunks(candidate_hash, tx) => { // We always have our own chunk. gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryAllChunks"); - self.respond_to_query_all_request(candidate_hash, |index| index == 0, tx) - .await; + self.respond_to_query_all_request( + candidate_hash, + |index| index == 0.into(), + tx, + ) + .await; }, AvailabilityStoreMessage::QueryChunkSize(candidate_hash, tx) => { gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryChunkSize"); @@ -195,12 +235,29 @@ impl MockAvailabilityStore { .expect("candidate was generated previously; qed"); gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); - let chunk_size = - self.state.chunks.get(*candidate_index).unwrap()[0].encoded_size(); + let chunk_size = self + .state + .chunks + .get(*candidate_index) + .unwrap() + .first() + .unwrap() + .encoded_size(); let _ = tx.send(Some(chunk_size)); }, - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk, tx } => { - gum::debug!(target: LOG_TARGET, chunk_index = ?chunk.index ,candidate_hash = ?candidate_hash, "Responding to StoreChunk"); + AvailabilityStoreMessage::StoreChunk { + candidate_hash, + chunk, + tx, + validator_index, + } => { + gum::debug!( + target: LOG_TARGET, + chunk_index = ?chunk.index, + validator_index = ?validator_index, + candidate_hash = ?candidate_hash, + "Responding to StoreChunk" + ); let _ = tx.send(Ok(())); }, _ => { diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index 10508f456a4..d70953926d1 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -37,7 +37,7 @@ use sc_network::{request_responses::ProtocolConfig, RequestFailure}; const LOG_TARGET: &str = "subsystem-bench::network-bridge"; const ALLOWED_PROTOCOLS: &[&str] = &[ - "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/1", + "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/2", "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_attested_candidate/2", ]; diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index 9788a1123ec..be9dbd55cb6 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,9 +26,9 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - AsyncBackingParams, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, GroupRotationInfo, - IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, SessionInfo, - ValidatorIndex, + node_features, AsyncBackingParams, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, + GroupRotationInfo, IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, + SessionInfo, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; @@ -41,6 +41,8 @@ const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; pub struct RuntimeApiState { // All authorities in the test, authorities: TestAuthorities, + // Node features state in the runtime + node_features: NodeFeatures, // Candidate hashes per block candidate_hashes: HashMap>, // Included candidates per bock @@ -76,6 +78,9 @@ impl MockRuntimeApi { session_index: SessionIndex, core_state: MockRuntimeApiCoreState, ) -> MockRuntimeApi { + // Enable chunk mapping feature to make systematic av-recovery possible. + let node_features = node_features_with_chunk_mapping_enabled(); + Self { state: RuntimeApiState { authorities, @@ -83,6 +88,7 @@ impl MockRuntimeApi { included_candidates, babe_epoch, session_index, + node_features, }, config, core_state, @@ -168,15 +174,15 @@ impl MockRuntimeApi { }, RuntimeApiMessage::Request( _block_hash, - RuntimeApiRequest::SessionExecutorParams(_session_index, sender), + RuntimeApiRequest::NodeFeatures(_session_index, sender), ) => { - let _ = sender.send(Ok(Some(Default::default()))); + let _ = sender.send(Ok(self.state.node_features.clone())); }, RuntimeApiMessage::Request( - _request, - RuntimeApiRequest::NodeFeatures(_session_index, sender), + _block_hash, + RuntimeApiRequest::SessionExecutorParams(_session_index, sender), ) => { - let _ = sender.send(Ok(NodeFeatures::EMPTY)); + let _ = sender.send(Ok(Some(Default::default()))); }, RuntimeApiMessage::Request( _block_hash, @@ -292,3 +298,10 @@ impl MockRuntimeApi { } } } + +pub fn node_features_with_chunk_mapping_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features +} diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 9686f456b9e..775f881eaad 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -1016,7 +1016,7 @@ pub trait RequestExt { impl RequestExt for Requests { fn authority_id(&self) -> Option<&AuthorityDiscoveryId> { match self { - Requests::ChunkFetchingV1(request) => { + Requests::ChunkFetching(request) => { if let Recipient::Authority(authority_id) = &request.peer { Some(authority_id) } else { @@ -1052,7 +1052,7 @@ impl RequestExt for Requests { fn into_response_sender(self) -> ResponseSender { match self { - Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.pending_response, + Requests::ChunkFetching(outgoing_request) => outgoing_request.pending_response, Requests::AvailableDataFetchingV1(outgoing_request) => outgoing_request.pending_response, _ => unimplemented!("unsupported request type"), @@ -1062,7 +1062,7 @@ impl RequestExt for Requests { /// Swaps the `ResponseSender` and returns the previous value. fn swap_response_sender(&mut self, new_sender: ResponseSender) -> ResponseSender { match self { - Requests::ChunkFetchingV1(outgoing_request) => + Requests::ChunkFetching(outgoing_request) => std::mem::replace(&mut outgoing_request.pending_response, new_sender), Requests::AvailableDataFetchingV1(outgoing_request) => std::mem::replace(&mut outgoing_request.pending_response, new_sender), @@ -1075,7 +1075,7 @@ impl RequestExt for Requests { /// Returns the size in bytes of the request payload. fn size(&self) -> usize { match self { - Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), + Requests::ChunkFetching(outgoing_request) => outgoing_request.payload.encoded_size(), Requests::AvailableDataFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), Requests::AttestedCandidateV2(outgoing_request) => diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs index 6c1ac86c450..375121c3746 100644 --- a/polkadot/node/subsystem-test-helpers/src/lib.rs +++ b/polkadot/node/subsystem-test-helpers/src/lib.rs @@ -25,7 +25,7 @@ use polkadot_node_subsystem::{ SubsystemError, SubsystemResult, TrySendError, }; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_primitives::{Hash, ValidatorIndex}; +use polkadot_primitives::{ChunkIndex, Hash}; use futures::{channel::mpsc, poll, prelude::*}; use parking_lot::Mutex; @@ -487,7 +487,7 @@ pub fn derive_erasure_chunks_with_proofs_and_root( .enumerate() .map(|(index, (proof, chunk))| ErasureChunk { chunk: chunk.to_vec(), - index: ValidatorIndex(index as _), + index: ChunkIndex(index as _), proof: Proof::try_from(proof).unwrap(), }) .collect::>(); diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 93dd43c5dbf..e03fc60a1fd 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -11,6 +11,7 @@ workspace = true [dependencies] derive_more = "0.99.17" +fatality = "0.1.1" futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } diff --git a/polkadot/node/subsystem-types/src/errors.rs b/polkadot/node/subsystem-types/src/errors.rs index 44136362a69..b8e70641243 100644 --- a/polkadot/node/subsystem-types/src/errors.rs +++ b/polkadot/node/subsystem-types/src/errors.rs @@ -18,6 +18,7 @@ use crate::JaegerError; use ::orchestra::OrchestraError as OverseerError; +use fatality::fatality; /// A description of an error causing the runtime API request to be unservable. #[derive(thiserror::Error, Debug, Clone)] @@ -68,32 +69,21 @@ impl core::fmt::Display for ChainApiError { impl std::error::Error for ChainApiError {} /// An error that may happen during Availability Recovery process. -#[derive(PartialEq, Debug, Clone)] +#[derive(PartialEq, Clone)] +#[fatality(splitable)] +#[allow(missing_docs)] pub enum RecoveryError { - /// A chunk is recovered but is invalid. + #[error("Invalid data")] Invalid, - /// A requested chunk is unavailable. + #[error("Data is unavailable")] Unavailable, - /// Erasure task channel closed, usually means node is shutting down. + #[fatal] + #[error("Erasure task channel closed")] ChannelClosed, } -impl std::fmt::Display for RecoveryError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - let msg = match self { - RecoveryError::Invalid => "Invalid", - RecoveryError::Unavailable => "Unavailable", - RecoveryError::ChannelClosed => "ChannelClosed", - }; - - write!(f, "{}", msg) - } -} - -impl std::error::Error for RecoveryError {} - /// An error type that describes faults that may happen /// /// These are: diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 2a54b3aed30..722a97989bc 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -480,6 +480,8 @@ pub enum AvailabilityRecoveryMessage { CandidateReceipt, SessionIndex, Option, // Optional backing group to request from first. + Option, /* A `CoreIndex` needs to be specified for the recovery process to + * prefer systematic chunk recovery. */ oneshot::Sender>, ), } @@ -515,7 +517,7 @@ pub enum AvailabilityStoreMessage { QueryChunkSize(CandidateHash, oneshot::Sender>), /// Query all chunks that we have for the given candidate hash. - QueryAllChunks(CandidateHash, oneshot::Sender>), + QueryAllChunks(CandidateHash, oneshot::Sender>), /// Query whether an `ErasureChunk` exists within the AV Store. /// @@ -530,6 +532,8 @@ pub enum AvailabilityStoreMessage { StoreChunk { /// A hash of the candidate this chunk belongs to. candidate_hash: CandidateHash, + /// Validator index. May not be equal to the chunk index. + validator_index: ValidatorIndex, /// The chunk itself. chunk: ErasureChunk, /// Sending side of the channel to send result to. @@ -549,6 +553,11 @@ pub enum AvailabilityStoreMessage { available_data: AvailableData, /// Erasure root we expect to get after chunking. expected_erasure_root: Hash, + /// Core index where the candidate was backed. + core_index: CoreIndex, + /// Node features at the candidate relay parent. Used for computing the validator->chunk + /// mapping. + node_features: NodeFeatures, /// Sending side of the channel to send result to. tx: oneshot::Sender>, }, diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index 219ea4d3f57..9259ca94f07 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -24,6 +24,7 @@ gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" schnellru = "0.2.1" +erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-jaeger = { path = "../jaeger" } diff --git a/polkadot/node/subsystem-util/src/availability_chunks.rs b/polkadot/node/subsystem-util/src/availability_chunks.rs new file mode 100644 index 00000000000..45168e4512e --- /dev/null +++ b/polkadot/node/subsystem-util/src/availability_chunks.rs @@ -0,0 +1,227 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use erasure_coding::systematic_recovery_threshold; +use polkadot_primitives::{node_features, ChunkIndex, CoreIndex, NodeFeatures, ValidatorIndex}; + +/// Compute the per-validator availability chunk index. +/// WARNING: THIS FUNCTION IS CRITICAL TO PARACHAIN CONSENSUS. +/// Any modification to the output of the function needs to be coordinated via the runtime. +/// It's best to use minimal/no external dependencies. +pub fn availability_chunk_index( + maybe_node_features: Option<&NodeFeatures>, + n_validators: usize, + core_index: CoreIndex, + validator_index: ValidatorIndex, +) -> Result { + if let Some(features) = maybe_node_features { + if let Some(&true) = features + .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) + .as_deref() + { + let systematic_threshold = systematic_recovery_threshold(n_validators)? as u32; + let core_start_pos = core_index.0 * systematic_threshold; + + return Ok(ChunkIndex((core_start_pos + validator_index.0) % n_validators as u32)) + } + } + + Ok(validator_index.into()) +} + +/// Compute the per-core availability chunk indices. Returns a Vec which maps ValidatorIndex to +/// ChunkIndex for a given availability core index +/// WARNING: THIS FUNCTION IS CRITICAL TO PARACHAIN CONSENSUS. +/// Any modification to the output of the function needs to be coordinated via the +/// runtime. It's best to use minimal/no external dependencies. +pub fn availability_chunk_indices( + maybe_node_features: Option<&NodeFeatures>, + n_validators: usize, + core_index: CoreIndex, +) -> Result, erasure_coding::Error> { + let identity = (0..n_validators).map(|index| ChunkIndex(index as u32)); + if let Some(features) = maybe_node_features { + if let Some(&true) = features + .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) + .as_deref() + { + let systematic_threshold = systematic_recovery_threshold(n_validators)? as u32; + let core_start_pos = core_index.0 * systematic_threshold; + + return Ok(identity + .into_iter() + .cycle() + .skip(core_start_pos as usize) + .take(n_validators) + .collect()) + } + } + + Ok(identity.collect()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashSet; + + pub fn node_features_with_mapping_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features + .resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features + .set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features + } + + pub fn node_features_with_other_bits_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::FirstUnassigned as usize + 1, true); + node_features + .set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, false); + node_features + } + + #[test] + fn test_availability_chunk_indices() { + let n_validators = 20u32; + let n_cores = 15u32; + + // If the mapping feature is not enabled, it should always be the identity vector. + { + for node_features in + [None, Some(NodeFeatures::EMPTY), Some(node_features_with_other_bits_enabled())] + { + for core_index in 0..n_cores { + let indices = availability_chunk_indices( + node_features.as_ref(), + n_validators as usize, + CoreIndex(core_index), + ) + .unwrap(); + + for validator_index in 0..n_validators { + assert_eq!( + indices[validator_index as usize], + availability_chunk_index( + node_features.as_ref(), + n_validators as usize, + CoreIndex(core_index), + ValidatorIndex(validator_index) + ) + .unwrap() + ) + } + + assert_eq!( + indices, + (0..n_validators).map(|i| ChunkIndex(i)).collect::>() + ); + } + } + } + + // Test when mapping feature is enabled. + { + let node_features = node_features_with_mapping_enabled(); + let mut previous_indices = None; + + for core_index in 0..n_cores { + let indices = availability_chunk_indices( + Some(&node_features), + n_validators as usize, + CoreIndex(core_index), + ) + .unwrap(); + + for validator_index in 0..n_validators { + assert_eq!( + indices[validator_index as usize], + availability_chunk_index( + Some(&node_features), + n_validators as usize, + CoreIndex(core_index), + ValidatorIndex(validator_index) + ) + .unwrap() + ) + } + + // Check that it's not equal to the previous core's indices. + if let Some(previous_indices) = previous_indices { + assert_ne!(previous_indices, indices); + } + + previous_indices = Some(indices.clone()); + + // Check that it's indeed a permutation. + assert_eq!( + (0..n_validators).map(|i| ChunkIndex(i)).collect::>(), + indices.into_iter().collect::>() + ); + } + } + } + + #[test] + // This is just a dummy test that checks the mapping against some hardcoded outputs, to prevent + // accidental changes to the algorithms. + fn prevent_changes_to_mapping() { + let n_validators = 7; + let node_features = node_features_with_mapping_enabled(); + + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(0)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![0, 1, 2, 3, 4, 5, 6] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(1)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![2, 3, 4, 5, 6, 0, 1] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(2)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![4, 5, 6, 0, 1, 2, 3] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(3)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![6, 0, 1, 2, 3, 4, 5] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(4)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![1, 2, 3, 4, 5, 6, 0] + ); + } +} diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index b93818070a1..d371b699b9e 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -25,17 +25,15 @@ #![warn(missing_docs)] +pub use overseer::{ + gen::{OrchestraError as OverseerError, Timeout}, + Subsystem, TimeoutExt, +}; use polkadot_node_subsystem::{ errors::{RuntimeApiError, SubsystemError}, messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, overseer, SubsystemSender, }; -use polkadot_primitives::{async_backing::BackingState, slashing, CoreIndex, ExecutorParams}; - -pub use overseer::{ - gen::{OrchestraError as OverseerError, Timeout}, - Subsystem, TimeoutExt, -}; pub use polkadot_node_metrics::{metrics, Metronome}; @@ -43,11 +41,12 @@ use futures::channel::{mpsc, oneshot}; use parity_scale_codec::Encode; use polkadot_primitives::{ - AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing::BackingState, slashing, AsyncBackingParams, AuthorityDiscoveryId, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, EncodeAs, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, }; pub use rand; use sp_application_crypto::AppCrypto; @@ -60,17 +59,18 @@ use std::{ use thiserror::Error; use vstaging::get_disabled_validators_with_fallback; +pub use determine_new_blocks::determine_new_blocks; pub use metered; pub use polkadot_node_network_protocol::MIN_GOSSIP_PEERS; -pub use determine_new_blocks::determine_new_blocks; - /// These reexports are required so that external crates can use the `delegated_subsystem` macro /// properly. pub mod reexports { pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext}; } +/// Helpers for the validator->chunk index mapping. +pub mod availability_chunks; /// A utility for managing the implicit view of the relay-chain derived from active /// leaves and the minimum allowed relay-parents that parachain candidates can have /// and be backed in those leaves' children. diff --git a/polkadot/node/subsystem-util/src/runtime/error.rs b/polkadot/node/subsystem-util/src/runtime/error.rs index 8751693b078..1111b119e95 100644 --- a/polkadot/node/subsystem-util/src/runtime/error.rs +++ b/polkadot/node/subsystem-util/src/runtime/error.rs @@ -28,7 +28,7 @@ pub enum Error { /// Runtime API subsystem is down, which means we're shutting down. #[fatal] #[error("Runtime request got canceled")] - RuntimeRequestCanceled(oneshot::Canceled), + RuntimeRequestCanceled(#[from] oneshot::Canceled), /// Some request to the runtime failed. /// For example if we prune a block we're requesting info about. diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 714384b32e3..214c58a8e88 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -31,8 +31,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ node_features::FeatureIndex, slashing, AsyncBackingParams, CandidateEvent, CandidateHash, - CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, - NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + CoreIndex, CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + IndexedVec, NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; @@ -348,7 +348,7 @@ where pub async fn get_occupied_cores( sender: &mut Sender, relay_parent: Hash, -) -> Result> +) -> Result> where Sender: overseer::SubsystemSender, { @@ -356,9 +356,10 @@ where Ok(cores .into_iter() - .filter_map(|core_state| { + .enumerate() + .filter_map(|(core_index, core_state)| { if let CoreState::Occupied(occupied) = core_state { - Some(occupied) + Some((CoreIndex(core_index as u32), occupied)) } else { None } diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 01f393086a6..061794ca06d 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -41,26 +41,26 @@ pub use v7::{ ApprovalVotingParams, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, - CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, - CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, - CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, - ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, - ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, - HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, - InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, - Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, - ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, - RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, - RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, - UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, - UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, - ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, - MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, - PARACHAIN_KEY_TYPE_ID, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, ChunkIndex, + CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, + CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, + EncodeAs, ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + ExecutorParamsPrepHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, + HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, + InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, + MultiDisputeStatementSet, NodeFeatures, Nonce, OccupiedCore, OccupiedCoreAssumption, + OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, PersistedValidationData, + PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, RuntimeMetricLabelValue, + RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, RuntimeMetricUpdate, + ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, Signed, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, SigningContext, Slot, + UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, + UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, + ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, + LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + MIN_CODE_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, + PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index 8a059408496..fb8406aece6 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -117,6 +117,34 @@ pub trait TypeIndex { #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ValidatorIndex(pub u32); +/// Index of an availability chunk. +/// +/// The underlying type is identical to `ValidatorIndex`, because +/// the number of chunks will always be equal to the number of validators. +/// However, the chunk index held by a validator may not always be equal to its `ValidatorIndex`, so +/// we use a separate type to make code easier to read. +#[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] +pub struct ChunkIndex(pub u32); + +impl From for ValidatorIndex { + fn from(c_index: ChunkIndex) -> Self { + ValidatorIndex(c_index.0) + } +} + +impl From for ChunkIndex { + fn from(v_index: ValidatorIndex) -> Self { + ChunkIndex(v_index.0) + } +} + +impl From for ChunkIndex { + fn from(n: u32) -> Self { + ChunkIndex(n) + } +} + // We should really get https://github.com/paritytech/polkadot/issues/2403 going .. impl From for ValidatorIndex { fn from(n: u32) -> Self { @@ -1787,6 +1815,14 @@ where self.0.get(index.type_index()) } + /// Returns a mutable reference to an element indexed using `K`. + pub fn get_mut(&mut self, index: K) -> Option<&mut V> + where + K: TypeIndex, + { + self.0.get_mut(index.type_index()) + } + /// Returns number of elements in vector. pub fn len(&self) -> usize { self.0.len() @@ -1989,6 +2025,7 @@ pub mod node_features { /// A feature index used to identify a bit into the node_features array stored /// in the HostConfiguration. #[repr(u8)] + #[derive(Clone, Copy)] pub enum FeatureIndex { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` @@ -1997,10 +2034,16 @@ pub mod node_features { /// The value stored there represents the assumed core index where the candidates /// are backed. This is needed for the elastic scaling MVP. ElasticScalingMVP = 1, + /// Tells if the chunk mapping feature is enabled. + /// Enables the implementation of + /// [RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). + /// Must not be enabled unless all validators and collators have stopped using `req_chunk` + /// protocol version 1. If it is enabled, validators can start systematic chunk recovery. + AvailabilityChunkMapping = 2, /// First unassigned feature bit. /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. - FirstUnassigned = 2, + FirstUnassigned = 3, } } diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md index 345b3d2e697..9b4082c49e2 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -396,7 +396,7 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: * Requires `(SessionIndex, SessionInfo, CandidateReceipt, ValidatorIndex, backing_group, block_hash, candidate_index)` * Extract the public key of the `ValidatorIndex` from the `SessionInfo` for the session. * Issue an `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session_index, Some(backing_group), - response_sender)` +Some(core_index), response_sender)` * Load the historical validation code of the parachain by dispatching a `RuntimeApiRequest::ValidationCodeByHash(descriptor.validation_code_hash)` against the state of `block_hash`. * Spawn a background task with a clone of `background_tx` diff --git a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md index c57c4589244..5b756080bec 100644 --- a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md +++ b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md @@ -1,84 +1,108 @@ # Availability Recovery -This subsystem is the inverse of the [Availability Distribution](availability-distribution.md) subsystem: validators -will serve the availability chunks kept in the availability store to nodes who connect to them. And the subsystem will -also implement the other side: the logic for nodes to connect to validators, request availability pieces, and -reconstruct the `AvailableData`. +This subsystem is responsible for recovering the data made available via the +[Availability Distribution](availability-distribution.md) subsystem, neccessary for candidate validation during the +approval/disputes processes. Additionally, it is also being used by collators to recover PoVs in adversarial scenarios +where the other collators of the para are censoring blocks. -This version of the availability recovery subsystem is based off of direct connections to validators. In order to -recover any given `AvailableData`, we must recover at least `f + 1` pieces from validators of the session. Thus, we will -connect to and query randomly chosen validators until we have received `f + 1` pieces. +According to the Polkadot protocol, in order to recover any given `AvailableData`, we generally must recover at least +`f + 1` pieces from validators of the session. Thus, we should connect to and query randomly chosen validators until we +have received `f + 1` pieces. + +In practice, there are various optimisations implemented in this subsystem which avoid querying all chunks from +different validators and/or avoid doing the chunk reconstruction altogether. ## Protocol -`PeerSet`: `Validation` +This version of the availability recovery subsystem is based only on request-response network protocols. Input: -* `NetworkBridgeUpdate(update)` -* `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session, backing_group, response)` +* `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session, backing_group, core_index, response)` Output: -* `NetworkBridge::SendValidationMessage` -* `NetworkBridge::ReportPeer` -* `AvailabilityStore::QueryChunk` +* `NetworkBridgeMessage::SendRequests` +* `AvailabilityStoreMessage::QueryAllChunks` +* `AvailabilityStoreMessage::QueryAvailableData` +* `AvailabilityStoreMessage::QueryChunkSize` + ## Functionality -We hold a state which tracks the currently ongoing recovery tasks, as well as which request IDs correspond to which -task. A recovery task is a structure encapsulating all recovery tasks with the network necessary to recover the -available data in respect to one candidate. +We hold a state which tracks the currently ongoing recovery tasks. A `RecoveryTask` is a structure encapsulating all +network tasks needed in order to recover the available data in respect to a candidate. + +Each `RecoveryTask` has a collection of ordered recovery strategies to try. ```rust +/// Subsystem state. struct State { - /// Each recovery is implemented as an independent async task, and the handles only supply information about the result. - ongoing_recoveries: FuturesUnordered, - /// A recent block hash for which state should be available. - live_block_hash: Hash, - // An LRU cache of recently recovered data. - availability_lru: LruMap>, + /// Each recovery task is implemented as its own async task, + /// and these handles are for communicating with them. + ongoing_recoveries: FuturesUnordered, + /// A recent block hash for which state should be available. + live_block: (BlockNumber, Hash), + /// An LRU cache of recently recovered data. + availability_lru: LruMap, + /// Cached runtime info. + runtime_info: RuntimeInfo, } -/// This is a future, which concludes either when a response is received from the recovery tasks, -/// or all the `awaiting` channels have closed. -struct RecoveryHandle { - candidate_hash: CandidateHash, - interaction_response: RemoteHandle, - awaiting: Vec>>, -} - -struct Unavailable; -struct Concluded(CandidateHash, Result); - -struct RecoveryTaskParams { - validator_authority_keys: Vec, - validators: Vec, - // The number of pieces needed. - threshold: usize, - candidate_hash: Hash, - erasure_root: Hash, +struct RecoveryParams { + /// Discovery ids of `validators`. + pub validator_authority_keys: Vec, + /// Number of validators. + pub n_validators: usize, + /// The number of regular chunks needed. + pub threshold: usize, + /// The number of systematic chunks needed. + pub systematic_threshold: usize, + /// A hash of the relevant candidate. + pub candidate_hash: CandidateHash, + /// The root of the erasure encoding of the candidate. + pub erasure_root: Hash, + /// Metrics to report. + pub metrics: Metrics, + /// Do not request data from availability-store. Useful for collators. + pub bypass_availability_store: bool, + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, + /// Protocol name for ChunkFetchingV1. + pub req_v1_protocol_name: ProtocolName, + /// Protocol name for ChunkFetchingV2. + pub req_v2_protocol_name: ProtocolName, + /// Whether or not chunk mapping is enabled. + pub chunk_mapping_enabled: bool, + /// Channel to the erasure task handler. + pub erasure_task_tx: mpsc::Sender, } -enum RecoveryTask { - RequestFromBackers { - // a random shuffling of the validators from the backing group which indicates the order - // in which we connect to them and request the chunk. - shuffled_backers: Vec, - } - RequestChunksFromValidators { - // a random shuffling of the validators which indicates the order in which we connect to the validators and - // request the chunk from them. - shuffling: Vec, - received_chunks: Map, - requesting_chunks: FuturesUnordered>, - } +pub struct RecoveryTask { + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + state: task::State, } -struct RecoveryTask { - to_subsystems: SubsystemSender, - params: RecoveryTaskParams, - source: Source, +#[async_trait::async_trait] +/// Common trait for runnable recovery strategies. +pub trait RecoveryStrategy: Send { + /// Main entry point of the strategy. + async fn run( + mut self: Box, + state: &mut task::State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result; + + /// Return the name of the strategy for logging purposes. + fn display_name(&self) -> &'static str; + + /// Return the strategy type for use as a metric label. + fn strategy_type(&self) -> &'static str; } ``` @@ -90,68 +114,71 @@ Ignore `BlockFinalized` signals. On `Conclude`, shut down the subsystem. -#### `AvailabilityRecoveryMessage::RecoverAvailableData(receipt, session, Option, response)` +#### `AvailabilityRecoveryMessage::RecoverAvailableData(...)` -1. Check the `availability_lru` for the candidate and return the data if so. -1. Check if there is already an recovery handle for the request. If so, add the response handle to it. +1. Check the `availability_lru` for the candidate and return the data if present. +1. Check if there is already a recovery handle for the request. If so, add the response handle to it. 1. Otherwise, load the session info for the given session under the state of `live_block_hash`, and initiate a recovery - task with *`launch_recovery_task`*. Add a recovery handle to the state and add the response channel to it. + task with `launch_recovery_task`. Add a recovery handle to the state and add the response channel to it. 1. If the session info is not available, return `RecoveryError::Unavailable` on the response channel. ### Recovery logic -#### `launch_recovery_task(session_index, session_info, candidate_receipt, candidate_hash, Option)` +#### `handle_recover(...) -> Result<()>` -1. Compute the threshold from the session info. It should be `f + 1`, where `n = 3f + k`, where `k in {1, 2, 3}`, and - `n` is the number of validators. -1. Set the various fields of `RecoveryParams` based on the validator lists in `session_info` and information about the - candidate. -1. If the `backing_group_index` is `Some`, start in the `RequestFromBackers` phase with a shuffling of the backing group - validator indices and a `None` requesting value. -1. Otherwise, start in the `RequestChunksFromValidators` source with `received_chunks`,`requesting_chunks`, and - `next_shuffling` all empty. -1. Set the `to_subsystems` sender to be equal to a clone of the `SubsystemContext`'s sender. -1. Initialize `received_chunks` to an empty set, as well as `requesting_chunks`. +Instantiate the appropriate `RecoveryStrategy`es, based on the subsystem configuration, params and session info. +Call `launch_recovery_task()`. -Launch the source as a background task running `run(recovery_task)`. +#### `launch_recovery_task(state, ctx, response_sender, recovery_strategies, params) -> Result<()>` -#### `run(recovery_task) -> Result` +Create the `RecoveryTask` and launch it as a background task running `recovery_task.run()`. -```rust -// How many parallel requests to have going at once. -const N_PARALLEL: usize = 50; -``` +#### `recovery_task.run(mut self) -> Result` + +* Loop: + * Pop a strategy from the queue. If none are left, return `RecoveryError::Unavailable`. + * Run the strategy. + * If the strategy returned successfully or returned `RecoveryError::Invalid`, break the loop. + +### Recovery strategies + +#### `FetchFull` + +This strategy tries requesting the full available data from the validators in the backing group to +which the node is already connected. They are tried one by one in a random order. +It is very performant if there's enough network bandwidth and the backing group is not overloaded. +The costly reed-solomon reconstruction is not needed. + +#### `FetchSystematicChunks` + +Very similar to `FetchChunks` below but requests from the validators that hold the systematic chunks, so that we avoid +reed-solomon reconstruction. Only possible if `node_features::FeatureIndex::AvailabilityChunkMapping` is enabled and +the `core_index` is supplied (currently only for recoveries triggered by approval voting). + +More info in +[RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). + +#### `FetchChunks` + +The least performant strategy but also the most comprehensive one. It's the only one that cannot fail under the +byzantine threshold assumption, so it's always added as the last one in the `recovery_strategies` queue. + +Performs parallel chunk requests to validators. When enough chunks were received, do the reconstruction. +In the worst case, all validators will be tried. + +### Default recovery strategy configuration + +#### For validators + +If the estimated available data size is smaller than a configured constant (currently 1Mib for Polkadot or 4Mib for +other networks), try doing `FetchFull` first. +Next, if the preconditions described in `FetchSystematicChunks` above are met, try systematic recovery. +As a last resort, do `FetchChunks`. + +#### For collators + +Collators currently only use `FetchChunks`, as they only attempt recoveries in rare scenarios. -* Request `AvailabilityStoreMessage::QueryAvailableData`. If it exists, return that. -* If the task contains `RequestFromBackers` - * Loop: - * If the `requesting_pov` is `Some`, poll for updates on it. If it concludes, set `requesting_pov` to `None`. - * If the `requesting_pov` is `None`, take the next backer off the `shuffled_backers`. - * If the backer is `Some`, issue a `NetworkBridgeMessage::Requests` with a network request for the - `AvailableData` and wait for the response. - * If it concludes with a `None` result, return to beginning. - * If it concludes with available data, attempt a re-encoding. - * If it has the correct erasure-root, break and issue a `Ok(available_data)`. - * If it has an incorrect erasure-root, return to beginning. - * Send the result to each member of `awaiting`. - * If the backer is `None`, set the source to `RequestChunksFromValidators` with a random shuffling of validators - and empty `received_chunks`, and `requesting_chunks` and break the loop. - -* If the task contains `RequestChunksFromValidators`: - * Request `AvailabilityStoreMessage::QueryAllChunks`. For each chunk that exists, add it to `received_chunks` and - remote the validator from `shuffling`. - * Loop: - * If `received_chunks + requesting_chunks + shuffling` lengths are less than the threshold, break and return - `Err(Unavailable)`. - * Poll for new updates from `requesting_chunks`. Check merkle proofs of any received chunks. If the request simply - fails due to network issues, insert into the front of `shuffling` to be retried. - * If `received_chunks` has more than `threshold` entries, attempt to recover the data. - * If that fails, return `Err(RecoveryError::Invalid)` - * If correct: - * If re-encoding produces an incorrect erasure-root, break and issue a `Err(RecoveryError::Invalid)`. - * break and issue `Ok(available_data)` - * Send the result to each member of `awaiting`. - * While there are fewer than `N_PARALLEL` entries in `requesting_chunks`, - * Pop the next item from `shuffling`. If it's empty and `requesting_chunks` is empty, return - `Err(RecoveryError::Unavailable)`. - * Issue a `NetworkBridgeMessage::Requests` and wait for the response in `requesting_chunks`. +Moreover, the recovery task is specially configured to not attempt requesting data from the local availability-store +(because it doesn't exist) and to not reencode the data after a succcessful recovery (because it's an expensive check +that is not needed; checking the pov_hash is enough for collators). diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index e011afb9708..c82d89d2d87 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -238,6 +238,9 @@ enum AvailabilityRecoveryMessage { CandidateReceipt, SessionIndex, Option, // Backing validator group to request the data directly from. + Option, /* A `CoreIndex` needs to be specified for the recovery process to + * prefer systematic chunk recovery. This is the core that the candidate + * was occupying while pending availability. */ ResponseChannel>, ), } diff --git a/polkadot/zombienet_tests/functional/0013-enable-node-feature.js b/polkadot/zombienet_tests/functional/0013-enable-node-feature.js new file mode 100644 index 00000000000..5fe2e38dad7 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0013-enable-node-feature.js @@ -0,0 +1,35 @@ +async function run(nodeName, networkInfo, index) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.configuration.setNodeFeature(Number(index), true)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml new file mode 100644 index 00000000000..67925a3d3a7 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml @@ -0,0 +1,46 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + validator = "true" + + [[relaychain.node_groups]] + name = "validator" + count = 3 + args = ["-lparachain=debug,parachain::availability-recovery=trace,parachain::availability-distribution=trace"] + +{% for id in range(2000,2002) %} +[[parachains]] +id = {{id}} +addToGenesis = true +cumulus_based = true +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl new file mode 100644 index 00000000000..e9e5a429e2a --- /dev/null +++ b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl @@ -0,0 +1,43 @@ +Description: Systematic chunk recovery is used if the chunk mapping feature is enabled. +Network: ./0013-systematic-chunk-recovery.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 +validator: reports node_roles is 4 + +# Ensure parachains are registered. +validator: parachain 2000 is registered within 60 seconds +validator: parachain 2001 is registered within 60 seconds + +# Ensure parachains made progress and approval checking works. +validator: parachain 2000 block height is at least 15 within 600 seconds +validator: parachain 2001 block height is at least 15 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + +validator: reports polkadot_parachain_approval_checking_finality_lag < 3 + +validator: reports polkadot_parachain_approvals_no_shows_total < 3 within 100 seconds + +# Ensure we used regular chunk recovery and that there are no failed recoveries. +validator: count of log lines containing "Data recovery from chunks complete" is at least 10 within 300 seconds +validator: count of log lines containing "Data recovery from systematic chunks complete" is 0 within 10 seconds +validator: count of log lines containing "Data recovery from systematic chunks is not possible" is 0 within 10 seconds +validator: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +validator: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds + +# Enable the chunk mapping feature +alice: js-script ./0013-enable-node-feature.js with "2" return is 0 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 60 within 400 seconds + +validator: reports polkadot_parachain_approval_checking_finality_lag < 3 + +validator: reports polkadot_parachain_approvals_no_shows_total < 3 within 100 seconds + +# Ensure we used systematic chunk recovery and that there are no failed recoveries. +validator: count of log lines containing "Data recovery from systematic chunks complete" is at least 10 within 300 seconds +validator: count of log lines containing "Data recovery from systematic chunks is not possible" is 0 within 10 seconds +validator: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +validator: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds diff --git a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml new file mode 100644 index 00000000000..881abab64fd --- /dev/null +++ b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + # Use an image that doesn't speak /req_chunk/2 protocol. + image = "{{POLKADOT_IMAGE}}:master-bde0bbe5" + name = "old" + count = 2 + args = ["-lparachain=debug,parachain::availability-recovery=trace,parachain::availability-distribution=trace"] + + [[relaychain.node_groups]] + name = "new" + count = 2 + args = ["-lparachain=debug,parachain::availability-recovery=trace,parachain::availability-distribution=trace,sub-libp2p=trace"] + +{% for id in range(2000,2002) %} +[[parachains]] +id = {{id}} +addToGenesis = true +cumulus_based = true +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator" + image = "{{CUMULUS_IMAGE}}" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl new file mode 100644 index 00000000000..2ac5012db66 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl @@ -0,0 +1,53 @@ +Description: Validators preserve backwards compatibility with peers speaking an older version of the /req_chunk protocol +Network: ./0014-chunk-fetching-network-compatibility.toml +Creds: config + +# Check authority status. +new: reports node_roles is 4 +old: reports node_roles is 4 + +# Ensure parachains are registered. +new: parachain 2000 is registered within 60 seconds +old: parachain 2000 is registered within 60 seconds +old: parachain 2001 is registered within 60 seconds +new: parachain 2001 is registered within 60 seconds + +# Ensure parachains made progress and approval checking works. +new: parachain 2000 block height is at least 15 within 600 seconds +old: parachain 2000 block height is at least 15 within 600 seconds +new: parachain 2001 block height is at least 15 within 600 seconds +old: parachain 2001 block height is at least 15 within 600 seconds + +new: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds +old: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + +new: reports polkadot_parachain_approval_checking_finality_lag < 3 +old: reports polkadot_parachain_approval_checking_finality_lag < 3 + +new: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds +old: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds + +# Ensure that there are no failed recoveries. +new: count of log lines containing "Data recovery from chunks complete" is at least 10 within 300 seconds +old: count of log lines containing "Data recovery from chunks complete" is at least 10 within 300 seconds +new: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +old: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +new: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds +old: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds + +# Ensure we used the fallback network request. +new: log line contains "Trying the fallback protocol" within 100 seconds + +# Ensure systematic recovery was not used. +old: count of log lines containing "Data recovery from systematic chunks complete" is 0 within 10 seconds +new: count of log lines containing "Data recovery from systematic chunks complete" is 0 within 10 seconds + +# Ensure availability-distribution worked fine +new: reports polkadot_parachain_fetched_chunks_total{success="succeeded"} is at least 10 within 400 seconds +old: reports polkadot_parachain_fetched_chunks_total{success="succeeded"} is at least 10 within 400 seconds + +new: reports polkadot_parachain_fetched_chunks_total{success="failed"} is 0 within 10 seconds +old: reports polkadot_parachain_fetched_chunks_total{success="failed"} is 0 within 10 seconds + +new: reports polkadot_parachain_fetched_chunks_total{success="not-found"} is 0 within 10 seconds +old: reports polkadot_parachain_fetched_chunks_total{success="not-found"} is 0 within 10 seconds diff --git a/prdoc/pr_1644.prdoc b/prdoc/pr_1644.prdoc new file mode 100644 index 00000000000..cc43847fa09 --- /dev/null +++ b/prdoc/pr_1644.prdoc @@ -0,0 +1,59 @@ +title: Add availability-recovery from systematic chunks + +doc: + - audience: Node Operator + description: | + Implements https://github.com/polkadot-fellows/RFCs/pull/47. This optimisation is guarded by a configuration bit in + the runtime and will only be enabled once a supermajority of the validators have upgraded to this version. + It's strongly advised to upgrade to this version. + - audience: Node Dev + description: | + Implements https://github.com/polkadot-fellows/RFCs/pull/47 and adds the logic for availability recovery from systematic chunks. + The /req_chunk/1 req-response protocol is now considered deprecated in favour of /req_chunk/2. Systematic recovery is guarded + by a configuration bit in the runtime (bit with index 2 of the node_features field from the HostConfiguration) + and must not be enabled until all (or almost all) validators have upgraded to the node version that includes + this PR. + +crates: + - name: sc-network + bump: minor + - name: polkadot-primitives + bump: minor + - name: cumulus-client-pov-recovery + bump: none + - name: polkadot-overseer + bump: none + - name: polkadot-node-primitives + bump: major + - name: polkadot-erasure-coding + bump: major + - name: polkadot-node-jaeger + bump: major + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-network-protocol + bump: major + - name: polkadot-service + bump: major + - name: polkadot-node-subsystem-util + bump: major + - name: polkadot-availability-distribution + bump: major + - name: polkadot-availability-recovery + bump: major + - name: polkadot-node-core-approval-voting + bump: minor + - name: polkadot-node-core-av-store + bump: major + - name: polkadot-network-bridge + bump: minor + - name: polkadot-node-core-backing + bump: none + - name: polkadot-node-core-bitfield-signing + bump: none + - name: polkadot-node-core-dispute-coordinator + bump: none + - name: cumulus-relay-chain-minimal-node + bump: minor + - name: polkadot + bump: minor diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 1aaa63191a8..27de12bc1ec 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -592,7 +592,7 @@ where crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, )), ) - .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) + .substream_upgrade_protocol_override(upgrade::Version::V1) .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) // NOTE: 24 is somewhat arbitrary and should be tuned in the future if necessary. // See -- GitLab From 3bf283ff22224e7713cf0c1b9878e9137dc6dbf7 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Tue, 28 May 2024 10:51:40 +0200 Subject: [PATCH 076/106] [subsytem-bench] Remove redundant banchmark_name param (#4540) Fixes https://github.com/paritytech/polkadot-sdk/issues/3601 Since we print benchmark results manually, we don't need to save benchmark_name anywhere, better just put the name inside `println!`. --- .../approval-voting-regression-bench.rs | 2 +- ...ilability-distribution-regression-bench.rs | 6 +--- .../availability-recovery-regression-bench.rs | 6 +--- ...statement-distribution-regression-bench.rs | 6 +--- .../src/cli/subsystem-bench.rs | 29 +++++-------------- .../subsystem-bench/src/lib/approval/mod.rs | 6 ++-- .../src/lib/availability/mod.rs | 13 ++++----- .../subsystem-bench/src/lib/environment.rs | 7 +---- .../subsystem-bench/src/lib/statement/mod.rs | 3 +- .../node/subsystem-bench/src/lib/usage.rs | 23 ++++----------- 10 files changed, 27 insertions(+), 74 deletions(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 280b8c53f7d..687063dd0eb 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -61,7 +61,7 @@ fn main() -> Result<(), String> { print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); std::io::stdout().flush().unwrap(); let (mut env, state) = prepare_test(config.clone(), options.clone(), false); - env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state)) + env.runtime().block_on(bench_approvals(&mut env, state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 72278b5770b..6083a90e481 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -53,11 +53,7 @@ fn main() -> Result<(), String> { polkadot_subsystem_bench::availability::TestDataAvailability::Write, false, ); - env.runtime().block_on(benchmark_availability_write( - "data_availability_write", - &mut env, - &state, - )) + env.runtime().block_on(benchmark_availability_write(&mut env, &state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index e5a8f1eb7c9..c734ac99e87 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -51,11 +51,7 @@ fn main() -> Result<(), String> { std::io::stdout().flush().unwrap(); let (mut env, _cfgs) = prepare_test(&state, TestDataAvailability::Read(options.clone()), false); - env.runtime().block_on(benchmark_availability_read( - "data_availability_read", - &mut env, - &state, - )) + env.runtime().block_on(benchmark_availability_read(&mut env, &state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); diff --git a/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs index abcb1e6783f..9cbe385e3f4 100644 --- a/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs +++ b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs @@ -44,11 +44,7 @@ fn main() -> Result<(), String> { print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); std::io::stdout().flush().unwrap(); let (mut env, _cfgs) = prepare_test(&state, false); - env.runtime().block_on(benchmark_statement_distribution( - "statement-distribution", - &mut env, - &state, - )) + env.runtime().block_on(benchmark_statement_distribution(&mut env, &state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); diff --git a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index 1e921500a4d..346a058b979 100644 --- a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -145,11 +145,8 @@ impl BenchCli { availability::TestDataAvailability::Read(opts), true, ); - env.runtime().block_on(availability::benchmark_availability_read( - &benchmark_name, - &mut env, - &state, - )) + env.runtime() + .block_on(availability::benchmark_availability_read(&mut env, &state)) }, TestObjective::DataAvailabilityWrite => { let state = availability::TestState::new(&test_config); @@ -158,32 +155,22 @@ impl BenchCli { availability::TestDataAvailability::Write, true, ); - env.runtime().block_on(availability::benchmark_availability_write( - &benchmark_name, - &mut env, - &state, - )) + env.runtime() + .block_on(availability::benchmark_availability_write(&mut env, &state)) }, TestObjective::ApprovalVoting(ref options) => { let (mut env, state) = approval::prepare_test(test_config.clone(), options.clone(), true); - env.runtime().block_on(approval::bench_approvals( - &benchmark_name, - &mut env, - state, - )) + env.runtime().block_on(approval::bench_approvals(&mut env, state)) }, TestObjective::StatementDistribution => { let state = statement::TestState::new(&test_config); let (mut env, _protocol_config) = statement::prepare_test(&state, true); - env.runtime().block_on(statement::benchmark_statement_distribution( - &benchmark_name, - &mut env, - &state, - )) + env.runtime() + .block_on(statement::benchmark_statement_distribution(&mut env, &state)) }, }; - println!("{}", usage); + println!("\n{}\n{}", benchmark_name.purple(), usage); } if let Some(agent_running) = agent_running { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 4a479b6af29..2e5831276ad 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -888,7 +888,6 @@ fn prepare_test_inner( } pub async fn bench_approvals( - benchmark_name: &str, env: &mut TestEnvironment, mut state: ApprovalTestState, ) -> BenchmarkUsage { @@ -900,12 +899,11 @@ pub async fn bench_approvals( env.registry().clone(), ) .await; - bench_approvals_run(benchmark_name, env, state, producer_rx).await + bench_approvals_run(env, state, producer_rx).await } /// Runs the approval benchmark. pub async fn bench_approvals_run( - benchmark_name: &str, env: &mut TestEnvironment, state: ApprovalTestState, producer_rx: oneshot::Receiver<()>, @@ -1072,5 +1070,5 @@ pub async fn bench_approvals_run( state.total_unique_messages.load(std::sync::atomic::Ordering::SeqCst) ); - env.collect_resource_usage(benchmark_name, &["approval-distribution", "approval-voting"]) + env.collect_resource_usage(&["approval-distribution", "approval-voting"]) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 955a8fbac2e..52944ffb08f 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -307,7 +307,6 @@ pub fn prepare_test( } pub async fn benchmark_availability_read( - benchmark_name: &str, env: &mut TestEnvironment, state: &TestState, ) -> BenchmarkUsage { @@ -373,11 +372,10 @@ pub async fn benchmark_availability_read( ); env.stop().await; - env.collect_resource_usage(benchmark_name, &["availability-recovery"]) + env.collect_resource_usage(&["availability-recovery"]) } pub async fn benchmark_availability_write( - benchmark_name: &str, env: &mut TestEnvironment, state: &TestState, ) -> BenchmarkUsage { @@ -508,8 +506,9 @@ pub async fn benchmark_availability_write( ); env.stop().await; - env.collect_resource_usage( - benchmark_name, - &["availability-distribution", "bitfield-distribution", "availability-store"], - ) + env.collect_resource_usage(&[ + "availability-distribution", + "bitfield-distribution", + "availability-store", + ]) } diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 42955d03022..a63f90da50b 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -351,13 +351,8 @@ impl TestEnvironment { } } - pub fn collect_resource_usage( - &self, - benchmark_name: &str, - subsystems_under_test: &[&str], - ) -> BenchmarkUsage { + pub fn collect_resource_usage(&self, subsystems_under_test: &[&str]) -> BenchmarkUsage { BenchmarkUsage { - benchmark_name: benchmark_name.to_string(), network_usage: self.network_usage(), cpu_usage: self.cpu_usage(subsystems_under_test), } diff --git a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs index 508dd9179f7..bd47505f56a 100644 --- a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs @@ -224,7 +224,6 @@ pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopol } pub async fn benchmark_statement_distribution( - benchmark_name: &str, env: &mut TestEnvironment, state: &TestState, ) -> BenchmarkUsage { @@ -446,5 +445,5 @@ pub async fn benchmark_statement_distribution( ); env.stop().await; - env.collect_resource_usage(benchmark_name, &["statement-distribution"]) + env.collect_resource_usage(&["statement-distribution"]) } diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index bfaac3265a2..883e9aa7ad0 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -23,7 +23,6 @@ use std::collections::HashMap; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct BenchmarkUsage { - pub benchmark_name: String, pub network_usage: Vec, pub cpu_usage: Vec, } @@ -32,8 +31,7 @@ impl std::fmt::Display for BenchmarkUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, - "\n{}\n\n{}\n{}\n\n{}\n{}\n", - self.benchmark_name.purple(), + "\n{}\n{}\n\n{}\n{}\n", format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), self.network_usage .iter() @@ -59,18 +57,17 @@ impl BenchmarkUsage { let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect(); Self { - benchmark_name: usages.first().map(|v| v.benchmark_name.clone()).unwrap_or_default(), network_usage: ResourceUsage::average_by_resource_name(&all_network_usages), cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage), } } pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { - check_usage(&self.benchmark_name, &self.network_usage, checks) + check_usage(&self.network_usage, checks) } pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { - check_usage(&self.benchmark_name, &self.cpu_usage, checks) + check_usage(&self.cpu_usage, checks) } pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option { @@ -105,18 +102,8 @@ impl BenchmarkUsage { } } -fn check_usage( - benchmark_name: &str, - usage: &[ResourceUsage], - checks: &[ResourceUsageCheck], -) -> Vec { - checks - .iter() - .filter_map(|check| { - check_resource_usage(usage, check) - .map(|message| format!("{}: {}", benchmark_name, message)) - }) - .collect() +fn check_usage(usage: &[ResourceUsage], checks: &[ResourceUsageCheck]) -> Vec { + checks.iter().filter_map(|check| check_resource_usage(usage, check)).collect() } fn check_resource_usage( -- GitLab From 6ed020037f4c2b6a6b542be6e5a15e86b0b7587b Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 28 May 2024 13:23:42 +0200 Subject: [PATCH 077/106] [CI] Deny adding git deps (#4572) Adds a small CI check to match the existing Git deps agains a known-bad list. --------- Signed-off-by: Oliver Tale-Yazdi --- .github/scripts/deny-git-deps.py | 40 ++++++++++++++++++++++++++++++ .github/workflows/checks-quick.yml | 4 ++- 2 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 .github/scripts/deny-git-deps.py diff --git a/.github/scripts/deny-git-deps.py b/.github/scripts/deny-git-deps.py new file mode 100644 index 00000000000..4b831c9347f --- /dev/null +++ b/.github/scripts/deny-git-deps.py @@ -0,0 +1,40 @@ +""" +Script to deny Git dependencies in the Cargo workspace. Can be passed one optional argument for the +root folder. If not provided, it will use the cwd. + +## Usage + python3 .github/scripts/deny-git-deps.py polkadot-sdk +""" + +import os +import sys + +from cargo_workspace import Workspace, DependencyLocation + +KNOWN_BAD_GIT_DEPS = { + 'simple-mermaid': ['xcm-docs'], + # Fix in + 'bandersnatch_vrfs': ['sp-core'], +} + +root = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() +workspace = Workspace.from_path(root) + +def check_dep(dep, used_by): + if dep.location != DependencyLocation.GIT: + return + + if used_by in KNOWN_BAD_GIT_DEPS.get(dep.name, []): + print(f'๐Ÿคจ Ignoring git dependency {dep.name} in {used_by}') + else: + print(f'๐Ÿšซ Found git dependency {dep.name} in {used_by}') + sys.exit(1) + +# Check the workspace dependencies that can be inherited: +for dep in workspace.dependencies: + check_dep(dep, "workspace") + +# And the dependencies of each crate: +for crate in workspace.crates: + for dep in crate.dependencies: + check_dep(dep, crate.name) diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index 3888928311a..cd9baf0d1bc 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -87,13 +87,15 @@ jobs: - name: install python deps run: | sudo apt-get update && sudo apt-get install -y python3-pip python3 - pip3 install toml + pip3 install toml "cargo-workspace>=1.2.6" - name: check integrity run: > python3 .github/scripts/check-workspace.py . --exclude "substrate/frame/contracts/fixtures/build" "substrate/frame/contracts/fixtures/contracts/common" + - name: deny git deps + run: python3 .github/scripts/deny-git-deps.py . check-markdown: runs-on: ubuntu-latest timeout-minutes: 10 -- GitLab From ea46ad556b31f1bcd3e37f16fd84bf7f2fa92015 Mon Sep 17 00:00:00 2001 From: Evgeny Snitko Date: Tue, 28 May 2024 18:27:01 +0400 Subject: [PATCH 078/106] Conditional `required` checks (#4544) Workaround for skipped but `required` github checks. The idea is to trigger the workflow but filter out unaffected jobs or steps. See [ci_cd 998](https://github.com/paritytech/ci_cd/issues/988) for details In `.github/workflows/check-changed-files.yml` there is a reusable workflow thad does all the checks and publishes results as outputs. Example usage: ``` jobs: changes: permissions: pull-requests: read uses: ./.github/workflows/check-changed-files.yml some-job: needs: changes if: ${{ needs.changes.outputs.rust }} ....... ``` --- .github/workflows/check-changed-files.yml | 57 +++++++++++++++++++++++ .github/workflows/tests-linux-stable.yml | 16 ++++++- .github/workflows/tests.yml | 18 +++++-- 3 files changed, 86 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/check-changed-files.yml diff --git a/.github/workflows/check-changed-files.yml b/.github/workflows/check-changed-files.yml new file mode 100644 index 00000000000..657c05cd047 --- /dev/null +++ b/.github/workflows/check-changed-files.yml @@ -0,0 +1,57 @@ +# Reusable workflow to perform checks and generate conditions for other workflows. +# Currently it checks if any Rust (build-related) file is changed +# and if the current (caller) workflow file is changed. +# Example: +# +# jobs: +# changes: +# permissions: +# pull-requests: read +# uses: ./.github/workflows/check-changed-files.yml +# some-job: +# needs: changes +# if: ${{ needs.changes.outputs.rust }} +# ....... + +name: Check changes files + +on: + workflow_call: + # Map the workflow outputs to job outputs + outputs: + rust: + value: ${{ jobs.changes.outputs.rust }} + description: 'true if any of the build-related OR current (caller) workflow files have changed' + current-workflow: + value: ${{ jobs.changes.outputs.current-workflow }} + description: 'true if current (caller) workflow file has changed' + +jobs: + changes: + runs-on: ubuntu-latest + permissions: + pull-requests: read + outputs: + # true if current workflow (caller) file is changed + rust: ${{ steps.filter.outputs.rust == 'true' || steps.filter.outputs.current-workflow == 'true' }} + current-workflow: ${{ steps.filter.outputs.current-workflow }} + steps: + - id: current-file + run: echo "current-workflow-file=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT + - run: echo "${{ steps.current-file.outputs.current-workflow-file }}" + # For pull requests it's not necessary to checkout the code + - id: filter + uses: dorny/paths-filter@v3 + with: + predicate-quantifier: 'every' + # current-workflow - check if the current (caller) workflow file is changed + # rust - check if any Rust (build-related) file is changed + filters: | + current-workflow: + - '${{ steps.current-file.outputs.current-workflow-file }}' + rust: + - '**/*' + - '!.github/**/*' + - '!prdoc/**/*' + - '!docs/**/*' + # \ No newline at end of file diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 8822ba6d250..5fdfabc437f 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -20,10 +20,18 @@ env: FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} jobs: + + changes: + permissions: + pull-requests: read + uses: ./.github/workflows/check-changed-files.yml + set-image: # GitHub Actions allows using 'env' in a container context. # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. + needs: changes + if: ${{ needs.changes.outputs.rust }} runs-on: ubuntu-latest outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -32,10 +40,12 @@ jobs: uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT + test-linux-stable-int: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 - needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} env: @@ -50,11 +60,13 @@ jobs: uses: actions/checkout@v4 - name: script run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored + # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 - needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} env: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 76bccba86b2..293acadc4e6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -19,6 +19,12 @@ env: FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} jobs: + + changes: + permissions: + pull-requests: read + uses: ./.github/workflows/check-changed-files.yml + set-image: # GitHub Actions allows using 'env' in a container context. # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 @@ -31,10 +37,12 @@ jobs: uses: actions/checkout@v4 - id: set_image run: cat .github/env >> $GITHUB_OUTPUT + quick-benchmarks: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 - needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} env: @@ -47,11 +55,13 @@ jobs: uses: actions/checkout@v4 - name: script run: time forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet + # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 - needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} continue-on-error: true # this rarely triggers in practice @@ -71,10 +81,12 @@ jobs: # - if [[ "$CI_JOB_STATUS" == "failed" ]]; then # printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; # fi + cargo-check-all-benches: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 30 - needs: [set-image] container: image: ${{ needs.set-image.outputs.IMAGE }} env: -- GitLab From 650b124fd81f4a438c212cb010cc0a730bac5c2d Mon Sep 17 00:00:00 2001 From: Bolaji Ahmad <56865496+bolajahmad@users.noreply.github.com> Date: Tue, 28 May 2024 15:44:58 +0100 Subject: [PATCH 079/106] Improve On_demand_assigner events (#4339) title: Improving `on_demand_assigner` emitted events doc: - audience: Rutime User description: OnDemandOrderPlaced event that is useful for indexers to save data related to on demand orders. Check [discussion here](https://substrate.stackexchange.com/questions/11366/ondemandassignmentprovider-ondemandorderplaced-event-was-removed/11389#11389). Closes #4254 crates: [ 'runtime-parachain] --------- Co-authored-by: Maciej --- .../parachains/src/assigner_on_demand/mod.rs | 38 ++++++++++++------- prdoc/pr_4339.prdoc | 13 +++++++ 2 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 prdoc/pr_4339.prdoc diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 37788a67ea0..795759b3b39 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -173,7 +173,7 @@ impl QueueStatusType { fn consume_index(&mut self, removed_index: QueueIndex) { if removed_index != self.smallest_index { self.freed_indices.push(removed_index.reverse()); - return + return; } let mut index = self.smallest_index.0.overflowing_add(1).0; // Even more to advance? @@ -368,10 +368,10 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// An order was placed at some spot price amount. - OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf }, - /// The value of the spot traffic multiplier changed. - SpotTrafficSet { traffic: FixedU128 }, + /// An order was placed at some spot price amount by orderer ordered_by + OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf, ordered_by: T::AccountId }, + /// The value of the spot price has likely changed + SpotPriceSet { spot_price: BalanceOf }, } #[pallet::error] @@ -410,12 +410,11 @@ pub mod pallet { /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation - /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` /// /// Events: - /// - `SpotOrderPlaced` + /// - `OnDemandOrderPlaced` #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::place_order_allow_death(QueueStatus::::get().size()))] pub fn place_order_allow_death( @@ -437,12 +436,11 @@ pub mod pallet { /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation - /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` /// /// Events: - /// - `SpotOrderPlaced` + /// - `OnDemandOrderPlaced` #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::place_order_keep_alive(QueueStatus::::get().size()))] pub fn place_order_keep_alive( @@ -539,12 +537,11 @@ where /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation - /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` /// /// Events: - /// - `SpotOrderPlaced` + /// - `OnDemandOrderPlaced` fn do_place_order( sender: ::AccountId, max_amount: BalanceOf, @@ -578,6 +575,12 @@ where Error::::QueueFull ); Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + Pallet::::deposit_event(Event::::OnDemandOrderPlaced { + para_id, + spot_price, + ordered_by: sender, + }); + Ok(()) }) } @@ -599,7 +602,14 @@ where // Only update storage on change if new_traffic != old_traffic { queue_status.traffic = new_traffic; - Pallet::::deposit_event(Event::::SpotTrafficSet { traffic: new_traffic }); + + // calculate the new spot price + let spot_price: BalanceOf = new_traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); + + // emit the event for updated new price + Pallet::::deposit_event(Event::::SpotPriceSet { spot_price }); } }, Err(err) => { @@ -721,7 +731,7 @@ where "Decreased affinity for a para that has not been served on a core?" ); if affinity != Some(0) { - return + return; } // No affinity more for entries on this core, free any entries: // @@ -754,7 +764,7 @@ where } else { *maybe_affinity = None; } - return Some(new_count) + return Some(new_count); } else { None } diff --git a/prdoc/pr_4339.prdoc b/prdoc/pr_4339.prdoc new file mode 100644 index 00000000000..634ccfa1a33 --- /dev/null +++ b/prdoc/pr_4339.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Improving on_demand_assigner emitted events + +doc: + - audience: Runtime User + description: | + Registering OnDemandOrderPlaced event that is useful for indexers to save data related to on demand orders. Adds SpotPriceSet as a new event to monitor on-demand spot prices. It updates whenever the price changes due to traffic. + +crates: + - name: polkadot-runtime-parachains + bump: minor \ No newline at end of file -- GitLab From ad22fa6e785bccfb5e1ceb113c870e02f132ce46 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Tue, 28 May 2024 18:53:53 +0300 Subject: [PATCH 080/106] Add metric to measure the time it takes to gather enough assignments (#4587) To understand with high granularity how many assignment tranches are triggered before we concur that we have enough assignments. This metric is important because the triggering of an assignment creates a lot of work in the system for approving the candidate and gossiping the necessary messages. --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: ordian --- .../node/core/approval-voting/src/import.rs | 6 +- polkadot/node/core/approval-voting/src/lib.rs | 170 ++++++++++++- .../node/core/approval-voting/src/tests.rs | 236 +++++++++++++++++- 3 files changed, 406 insertions(+), 6 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index f4be42a4845..13b0b1bae1b 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -607,7 +607,7 @@ pub(crate) mod tests { use super::*; use crate::{ approval_db::common::{load_block_entry, DbBackend}, - RuntimeInfo, RuntimeInfoConfig, + RuntimeInfo, RuntimeInfoConfig, MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, }; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; @@ -622,6 +622,7 @@ pub(crate) mod tests { node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, SessionInfo, ValidatorId, ValidatorIndex, }; + use schnellru::{ByLength, LruMap}; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, @@ -658,6 +659,9 @@ pub(crate) mod tests { clock: Box::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::default()), spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), } } diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index c667aee7361..eece6b15805 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -63,6 +63,12 @@ use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; use sp_consensus::SyncOracle; use sp_consensus_slots::Slot; +use std::time::Instant; + +// The max number of blocks we keep track of assignments gathering times. Normally, +// this would never be reached because we prune the data on finalization, but we need +// to also ensure the data is not growing unecessarily large. +const MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS: u32 = 100; use futures::{ channel::oneshot, @@ -182,6 +188,14 @@ struct MetricsInner { time_recover_and_approve: prometheus::Histogram, candidate_signatures_requests_total: prometheus::Counter, unapproved_candidates_in_unfinalized_chain: prometheus::Gauge, + // The time it takes in each stage to gather enough assignments. + // We defined a `stage` as being the entire process of gathering enough assignments to + // be able to approve a candidate: + // E.g: + // - Stage 0: We wait for the needed_approvals assignments to be gathered. + // - Stage 1: We wait for enough tranches to cover all no-shows in stage 0. + // - Stage 2: We wait for enough tranches to cover all no-shows of stage 1. + assignments_gathering_time_by_stage: prometheus::HistogramVec, } /// Approval Voting metrics. @@ -302,6 +316,20 @@ impl Metrics { metrics.unapproved_candidates_in_unfinalized_chain.set(count as u64); } } + + pub fn observe_assignment_gathering_time(&self, stage: usize, elapsed_as_millis: usize) { + if let Some(metrics) = &self.0 { + let stage_string = stage.to_string(); + // We don't want to have too many metrics entries with this label to not put unncessary + // pressure on the metrics infrastructure, so we cap the stage at 10, which is + // equivalent to having already a finalization lag to 10 * no_show_slots, so it should + // be more than enough. + metrics + .assignments_gathering_time_by_stage + .with_label_values(&[if stage < 10 { stage_string.as_str() } else { "inf" }]) + .observe(elapsed_as_millis as f64); + } + } } impl metrics::Metrics for Metrics { @@ -431,6 +459,17 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + assignments_gathering_time_by_stage: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_parachain_assignments_gather_time_by_stage_ms", + "The time in ms it takes for each stage to gather enough assignments needed for approval", + ) + .buckets(vec![0.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0, 32000.0]), + &["stage"], + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) @@ -788,6 +827,28 @@ struct State { clock: Box, assignment_criteria: Box, spans: HashMap, + // Per block, candidate records about how long we take until we gather enough + // assignments, this is relevant because it gives us a good idea about how many + // tranches we trigger and why. + per_block_assignments_gathering_times: + LruMap>, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct AssignmentGatheringRecord { + // The stage we are in. + // Candidate assignment gathering goes in stages, first we wait for needed_approvals(stage 0) + // Then if we have no-shows, we move into stage 1 and wait for enough tranches to cover all + // no-shows. + stage: usize, + // The time we started the stage. + stage_start: Option, +} + +impl Default for AssignmentGatheringRecord { + fn default() -> Self { + AssignmentGatheringRecord { stage: 0, stage_start: Some(Instant::now()) } + } } #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] @@ -893,6 +954,96 @@ impl State { }, } } + + fn mark_begining_of_gathering_assignments( + &mut self, + block_number: BlockNumber, + block_hash: Hash, + candidate: CandidateHash, + ) { + if let Some(record) = self + .per_block_assignments_gathering_times + .get_or_insert(block_number, HashMap::new) + .and_then(|records| Some(records.entry((block_hash, candidate)).or_default())) + { + if record.stage_start.is_none() { + record.stage += 1; + gum::debug!( + target: LOG_TARGET, + stage = ?record.stage, + ?block_hash, + ?candidate, + "Started a new assignment gathering stage", + ); + record.stage_start = Some(Instant::now()); + } + } + } + + fn mark_gathered_enough_assignments( + &mut self, + block_number: BlockNumber, + block_hash: Hash, + candidate: CandidateHash, + ) -> AssignmentGatheringRecord { + let record = self + .per_block_assignments_gathering_times + .get(&block_number) + .and_then(|entry| entry.get_mut(&(block_hash, candidate))); + let stage = record.as_ref().map(|record| record.stage).unwrap_or_default(); + AssignmentGatheringRecord { + stage, + stage_start: record.and_then(|record| record.stage_start.take()), + } + } + + fn cleanup_assignments_gathering_timestamp(&mut self, remove_lower_than: BlockNumber) { + while let Some((block_number, _)) = self.per_block_assignments_gathering_times.peek_oldest() + { + if *block_number < remove_lower_than { + self.per_block_assignments_gathering_times.pop_oldest(); + } else { + break + } + } + } + + fn observe_assignment_gathering_status( + &mut self, + metrics: &Metrics, + required_tranches: &RequiredTranches, + block_hash: Hash, + block_number: BlockNumber, + candidate_hash: CandidateHash, + ) { + match required_tranches { + RequiredTranches::All | RequiredTranches::Pending { .. } => { + self.mark_begining_of_gathering_assignments( + block_number, + block_hash, + candidate_hash, + ); + }, + RequiredTranches::Exact { .. } => { + let time_to_gather = + self.mark_gathered_enough_assignments(block_number, block_hash, candidate_hash); + if let Some(gathering_started) = time_to_gather.stage_start { + if gathering_started.elapsed().as_millis() > 6000 { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Long assignment gathering time", + ); + } + metrics.observe_assignment_gathering_time( + time_to_gather.stage, + gathering_started.elapsed().as_millis() as usize, + ) + } + }, + } + } } #[derive(Debug, Clone)] @@ -942,6 +1093,9 @@ where clock: subsystem.clock, assignment_criteria, spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), }; // `None` on start-up. Gets initialized/updated on leaf update @@ -973,7 +1127,7 @@ where subsystem.metrics.on_wakeup(); process_wakeup( &mut ctx, - &state, + &mut state, &mut overlayed_db, &mut session_info_provider, woken_block, @@ -1632,6 +1786,7 @@ async fn handle_from_overseer( // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans // accordingly. wakeups.prune_finalized_wakeups(block_number, &mut state.spans); + state.cleanup_assignments_gathering_timestamp(block_number); // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans // accordingly. let hash_set = @@ -2478,7 +2633,7 @@ where async fn check_and_import_approval( sender: &mut Sender, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, @@ -2710,7 +2865,7 @@ impl ApprovalStateTransition { // as necessary and schedules any further wakeups. async fn advance_approval_state( sender: &mut Sender, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, @@ -2761,6 +2916,13 @@ where approval_entry, status.required_tranches.clone(), ); + state.observe_assignment_gathering_status( + &metrics, + &status.required_tranches, + block_hash, + block_entry.block_number(), + candidate_hash, + ); // Check whether this is approved, while allowing a maximum // assignment tick of `now - APPROVAL_DELAY` - that is, that @@ -2941,7 +3103,7 @@ fn should_trigger_assignment( #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn process_wakeup( ctx: &mut Context, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, relay_block: Hash, diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index c3709de59e8..43af8d476a6 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -17,6 +17,10 @@ use self::test_helpers::mock::new_leaf; use super::*; use crate::backend::V1ReadBackend; +use overseer::prometheus::{ + prometheus::{IntCounter, IntCounterVec}, + Histogram, HistogramOpts, HistogramVec, Opts, +}; use polkadot_node_primitives::{ approval::{ v1::{ @@ -40,7 +44,7 @@ use polkadot_primitives::{ ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, Id as ParaId, IndexedVec, NodeFeatures, ValidationCode, ValidatorSignature, }; -use std::time::Duration; +use std::{cmp::max, time::Duration}; use assert_matches::assert_matches; use async_trait::async_trait; @@ -5049,3 +5053,233 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { virtual_overseer }); } + +// Test we correctly update the timer when we mark the beginning of gathering assignments. +#[test] +fn test_gathering_assignments_statements() { + let mut state = State { + keystore: Arc::new(LocalKeystore::in_memory()), + slot_duration_millis: 6_000, + clock: Box::new(MockClock::default()), + assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), + spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), + }; + + for i in 0..200i32 { + state.mark_begining_of_gathering_assignments( + i as u32, + Hash::repeat_byte(i as u8), + CandidateHash(Hash::repeat_byte(i as u8)), + ); + assert!( + state.per_block_assignments_gathering_times.len() <= + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS as usize + ); + + assert_eq!( + state + .per_block_assignments_gathering_times + .iter() + .map(|(block_number, _)| block_number) + .min(), + Some(max(0, i - MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS as i32 + 1) as u32).as_ref() + ) + } + assert_eq!( + state.per_block_assignments_gathering_times.len(), + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS as usize + ); + + let nothing_changes = state + .per_block_assignments_gathering_times + .iter() + .map(|(block_number, _)| *block_number) + .sorted() + .collect::>(); + + for i in 150..200i32 { + state.mark_begining_of_gathering_assignments( + i as u32, + Hash::repeat_byte(i as u8), + CandidateHash(Hash::repeat_byte(i as u8)), + ); + assert_eq!( + nothing_changes, + state + .per_block_assignments_gathering_times + .iter() + .map(|(block_number, _)| *block_number) + .sorted() + .collect::>() + ); + } + + for i in 110..120 { + let block_hash = Hash::repeat_byte(i as u8); + let candidate_hash = CandidateHash(Hash::repeat_byte(i as u8)); + + state.mark_gathered_enough_assignments(i as u32, block_hash, candidate_hash); + + assert!(state + .per_block_assignments_gathering_times + .get(&i) + .unwrap() + .get(&(block_hash, candidate_hash)) + .unwrap() + .stage_start + .is_none()); + state.mark_begining_of_gathering_assignments(i as u32, block_hash, candidate_hash); + let record = state + .per_block_assignments_gathering_times + .get(&i) + .unwrap() + .get(&(block_hash, candidate_hash)) + .unwrap(); + + assert!(record.stage_start.is_some()); + assert_eq!(record.stage, 1); + } + + state.cleanup_assignments_gathering_timestamp(200); + assert_eq!(state.per_block_assignments_gathering_times.len(), 0); +} + +// Test we note the time we took to transition RequiredTranche from Pending to Exact and +// that we increase the stage when we transition from Exact to Pending. +#[test] +fn test_observe_assignment_gathering_status() { + let mut state = State { + keystore: Arc::new(LocalKeystore::in_memory()), + slot_duration_millis: 6_000, + clock: Box::new(MockClock::default()), + assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), + spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), + }; + + let metrics_inner = MetricsInner { + imported_candidates_total: IntCounter::new("dummy", "dummy").unwrap(), + assignments_produced: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")).unwrap(), + approvals_produced_total: IntCounterVec::new(Opts::new("dummy", "dummy"), &["dummy"]) + .unwrap(), + no_shows_total: IntCounter::new("dummy", "dummy").unwrap(), + observed_no_shows: IntCounter::new("dummy", "dummy").unwrap(), + approved_by_one_third: IntCounter::new("dummy", "dummy").unwrap(), + wakeups_triggered_total: IntCounter::new("dummy", "dummy").unwrap(), + coalesced_approvals_buckets: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + coalesced_approvals_delay: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + candidate_approval_time_ticks: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + block_approval_time_ticks: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + time_db_transaction: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")).unwrap(), + time_recover_and_approve: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + candidate_signatures_requests_total: IntCounter::new("dummy", "dummy").unwrap(), + unapproved_candidates_in_unfinalized_chain: prometheus::Gauge::::new( + "dummy", "dummy", + ) + .unwrap(), + assignments_gathering_time_by_stage: HistogramVec::new( + HistogramOpts::new("test", "test"), + &["stage"], + ) + .unwrap(), + }; + + let metrics = Metrics(Some(metrics_inner)); + let block_hash = Hash::repeat_byte(1); + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + let block_number = 1; + + // Transition from Pending to Exact and check stage 0 time is recorded. + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Pending { + considered: 0, + next_no_show: None, + maximum_broadcast: 0, + clock_drift: 0, + }, + block_hash, + block_number, + candidate_hash, + ); + + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Exact { + needed: 2, + tolerated_missing: 2, + next_no_show: None, + last_assignment_tick: None, + }, + block_hash, + block_number, + candidate_hash, + ); + + let value = metrics + .0 + .as_ref() + .unwrap() + .assignments_gathering_time_by_stage + .get_metric_with_label_values(&["0"]) + .unwrap(); + + assert_eq!(value.get_sample_count(), 1); + + // Transition from Exact to Pending to Exact and check stage 1 time is recorded. + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Pending { + considered: 0, + next_no_show: None, + maximum_broadcast: 0, + clock_drift: 0, + }, + block_hash, + block_number, + candidate_hash, + ); + + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Exact { + needed: 2, + tolerated_missing: 2, + next_no_show: None, + last_assignment_tick: None, + }, + block_hash, + block_number, + candidate_hash, + ); + + let value = metrics + .0 + .as_ref() + .unwrap() + .assignments_gathering_time_by_stage + .get_metric_with_label_values(&["0"]) + .unwrap(); + + assert_eq!(value.get_sample_count(), 1); + + let value = metrics + .0 + .as_ref() + .unwrap() + .assignments_gathering_time_by_stage + .get_metric_with_label_values(&["1"]) + .unwrap(); + + assert_eq!(value.get_sample_count(), 1); +} -- GitLab From 2b1c606a338c80c5220c502c56a4b489f6d51488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 28 May 2024 18:08:31 +0200 Subject: [PATCH 081/106] parachain-inherent: Make `para_id` more prominent (#4555) This should make it more obvious that at instantiation of the `MockValidationDataInherentDataProvider` the `para_id` needs to be passed. --- cumulus/client/parachain-inherent/src/mock.rs | 21 ++++++++++--------- prdoc/pr_4555.prdoc | 11 ++++++++++ 2 files changed, 22 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_4555.prdoc diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index 896df7a7242..dfe4a66c3dc 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -46,12 +46,14 @@ pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; /// your parachain's configuration in order to mock the MQC heads properly. /// See [`MockXcmConfig`] for more information pub struct MockValidationDataInherentDataProvider { - /// The current block number of the local block chain (the parachain) + /// The current block number of the local block chain (the parachain). pub current_para_block: u32, - /// The current block head data of the local block chain (the parachain) + /// The parachain ID of the parachain for that the inherent data is created. + pub para_id: ParaId, + /// The current block head data of the local block chain (the parachain). pub current_para_block_head: Option, /// The relay block in which this parachain appeared to start. This will be the relay block - /// number in para block #P1 + /// number in para block #P1. pub relay_offset: u32, /// The number of relay blocks that elapses between each parablock. Probably set this to 1 or 2 /// to simulate optimistic or realistic relay chain behavior. @@ -59,19 +61,21 @@ pub struct MockValidationDataInherentDataProvider { /// Number of parachain blocks per relay chain epoch /// Mock epoch is computed by dividing `current_para_block` by this value. pub para_blocks_per_relay_epoch: u32, - /// Function to mock BABE one epoch ago randomness + /// Function to mock BABE one epoch ago randomness. pub relay_randomness_config: R, /// XCM messages and associated configuration information. pub xcm_config: MockXcmConfig, /// Inbound downward XCM messages to be injected into the block. pub raw_downward_messages: Vec>, - // Inbound Horizontal messages sorted by channel + // Inbound Horizontal messages sorted by channel. pub raw_horizontal_messages: Vec<(ParaId, Vec)>, // Additional key-value pairs that should be injected. pub additional_key_values: Option, Vec)>>, } +/// Something that can generate randomness. pub trait GenerateRandomness { + /// Generate the randomness using the given `input`. fn generate_randomness(&self, input: I) -> relay_chain::Hash; } @@ -91,8 +95,6 @@ impl GenerateRandomness for () { /// parachain's storage, and the corresponding relay data mocked. #[derive(Default)] pub struct MockXcmConfig { - /// The parachain id of the parachain being mocked. - pub para_id: ParaId, /// The starting state of the dmq_mqc_head. pub starting_dmq_mqc_head: relay_chain::Hash, /// The starting state of each parachain's mqc head @@ -119,7 +121,6 @@ impl MockXcmConfig { pub fn new, C: StorageProvider>( client: &C, parent_block: B::Hash, - para_id: ParaId, parachain_system_name: ParachainSystemName, ) -> Self { let starting_dmq_mqc_head = client @@ -152,7 +153,7 @@ impl MockXcmConfig { }) .unwrap_or_default(); - Self { para_id, starting_dmq_mqc_head, starting_hrmp_mqc_heads } + Self { starting_dmq_mqc_head, starting_hrmp_mqc_heads } } } @@ -166,7 +167,7 @@ impl> InherentDataProvider ) -> Result<(), sp_inherents::Error> { // Use the "sproof" (spoof proof) builder to build valid mock state root and proof. let mut sproof_builder = - RelayStateSproofBuilder { para_id: self.xcm_config.para_id, ..Default::default() }; + RelayStateSproofBuilder { para_id: self.para_id, ..Default::default() }; // Calculate the mocked relay block based on the current para block let relay_parent_number = diff --git a/prdoc/pr_4555.prdoc b/prdoc/pr_4555.prdoc new file mode 100644 index 00000000000..257115d236e --- /dev/null +++ b/prdoc/pr_4555.prdoc @@ -0,0 +1,11 @@ +title: Move `para_id` to `MockValidationDataInherentDataProvider` + +doc: + - audience: Node Dev + description: | + This moves the `para_id` from `MockXcmConfig` to `MockValidationDataInherentDataProvider` to make it more prominent. The `para_id` should + be set to the parachain id of the parachain that gets mocked to ensure that the relay chain storage proof is setup correctly etc. + +crates: + - name: cumulus-client-parachain-inherent + bump: major -- GitLab From d6cf147c1bda601e811bf5813b0d46ca1c8ad9b9 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Tue, 28 May 2024 19:57:43 +0200 Subject: [PATCH 082/106] Filter workspace dependencies in the templates (#4599) This detaches the templates from monorepo's workspace dependencies. Currently the templates [re-use the monorepo's dependencies](https://github.com/paritytech/polkadot-sdk-minimal-template/blob/bd8afe66ec566d61f36b0e3d731145741a9e9e19/Cargo.toml#L45-L58), most of which are not needed. The simplest approach is to specify versions directly and not use workspace dependencies in the templates. Another approach would be to programmatically filter dependencies that are actually needed - but not sure if it's worth it, given that it would complicate the synchronization job. cc @kianenigma @gupnik --- .github/workflows/misc-sync-templates.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml index b040c2fc89b..d8027014863 100644 --- a/.github/workflows/misc-sync-templates.yml +++ b/.github/workflows/misc-sync-templates.yml @@ -104,8 +104,6 @@ jobs: toml set templates/${{ matrix.template }}/Cargo.toml 'workspace.package.edition' "$(toml get --raw Cargo.toml 'workspace.package.edition')" > Cargo.temp mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml - - toml get Cargo.toml 'workspace.dependencies' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml working-directory: polkadot-sdk - name: Print the result Cargo.tomls for debugging if: runner.debug == '1' @@ -118,6 +116,18 @@ jobs: - name: Copy over the new changes run: | cp -r polkadot-sdk/templates/${{ matrix.template }}/* "${{ env.template-path }}/" + - name: Copy over required workspace dependencies + run: | + echo -e "\n[workspace.dependencies]" >> Cargo.toml + set +e + # If a workspace dependency is required.. + while cargo tree --depth 1 --prefix none --no-dedupe 2>&1 | grep 'was not found in `workspace.dependencies`'; do + # Get its name.. + missing_dep=$(cargo tree --depth 1 --prefix none --no-dedupe 2>&1 | grep 'was not found in `workspace.dependencies`' | sed -E 's/(.*)`dependency.(.*)` was not found in `workspace.dependencies`/\2/') + # And copy the dependency from the monorepo. + toml get ../polkadot-sdk/Cargo.toml 'workspace.dependencies' --output-toml | grep "^${missing_dep} = " >> Cargo.toml + done; + working-directory: "${{ env.template-path }}" # 3. Verify the build. Push the changes or create a PR. -- GitLab From 5f68c93039fce08d7f711025eddc5343b0272111 Mon Sep 17 00:00:00 2001 From: gupnik Date: Wed, 29 May 2024 09:11:47 +0530 Subject: [PATCH 083/106] Moves runtime macro out of experimental flag (#4249) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Step in https://github.com/paritytech/polkadot-sdk/issues/3688 Now that the `runtime` macro (Construct Runtime V2) has been successfully deployed on Westend, this PR moves it out of the experimental feature flag and makes it generally available for runtime devs. --------- Co-authored-by: Bastian Kรถcher Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- prdoc/pr_4249.prdoc | 17 +++++++++++++++++ substrate/frame/support/procedural/src/lib.rs | 1 - .../frame/support/procedural/src/runtime/mod.rs | 2 -- substrate/frame/support/src/lib.rs | 1 - 4 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_4249.prdoc diff --git a/prdoc/pr_4249.prdoc b/prdoc/pr_4249.prdoc new file mode 100644 index 00000000000..1a267e26392 --- /dev/null +++ b/prdoc/pr_4249.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Moves runtime macro out of experimental flag + +doc: + - audience: Runtime Dev + description: | + Now that the runtime macro (Construct Runtime V2) has been successfully deployed on Westend, + this PR moves it out of the experimental feature flag and makes it generally available for + runtime devs. + +crates: + - name: frame-support + bump: minor + - name: frame-support-procedural + bump: minor diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 53f01329d18..e812ac071b2 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1249,7 +1249,6 @@ pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { /// /// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = /// frame_system::Pallet` -#[cfg(feature = "experimental")] #[proc_macro_attribute] pub fn runtime(attr: TokenStream, item: TokenStream) -> TokenStream { runtime::runtime(attr, item) diff --git a/substrate/frame/support/procedural/src/runtime/mod.rs b/substrate/frame/support/procedural/src/runtime/mod.rs index aaae579eb08..1d4242cd122 100644 --- a/substrate/frame/support/procedural/src/runtime/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/mod.rs @@ -200,8 +200,6 @@ //! +----------------------+ //! ``` -#![cfg(feature = "experimental")] - pub use parse::Def; use proc_macro::TokenStream; use syn::spanned::Spanned; diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 7eddea1259d..8ae1f56b4d6 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -508,7 +508,6 @@ pub use frame_support_procedural::{ construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; -#[cfg(feature = "experimental")] pub use frame_support_procedural::runtime; #[doc(hidden)] -- GitLab From 89604daa0f4244bc83782bd489918cfecb81a7d0 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 29 May 2024 07:50:04 +0200 Subject: [PATCH 084/106] Add omni bencher & chain-spec-builder bins to release (#4557) Closes: https://github.com/paritytech/polkadot-sdk/issues/4354 This PR adds the steps to build and attach `frame-omni-bencher` and `chain-spec-builder` binaries to the release draft ## TODO - [x] add also chain-spec-builder binary - [ ] ~~check/investigate Kian's comment: `chain spec builder. Ideally I want it to match the version of the sp-genesis-builder crate`~~ see [comment](https://github.com/paritytech/polkadot-sdk/pull/4518#issuecomment-2134731355) - [ ] Backport to `polkadot-sdk@1.11` release, so we can use it for next fellows release: https://github.com/polkadot-fellows/runtimes/pull/324 - [ ] Backport to `polkadot-sdk@1.12` release --------- Co-authored-by: Branislav Kontur --- .../release-30_publish_release_draft.yml | 65 +++++++++++++++++-- 1 file changed, 60 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index a9e521051d0..f39eb4c1716 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -23,13 +23,44 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: - uses: "./.github/workflows/srtool.yml" + uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" + build-binaries: + runs-on: ubuntu-latest + strategy: + matrix: + binary: [ frame-omni-bencher, chain-spec-builder ] + steps: + - name: Checkout sources + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + + - name: Install protobuf-compiler + run: | + sudo apt update + sudo apt install -y protobuf-compiler + + - name: Build ${{ matrix.binary }} binary + run: | + if [[ ${{ matrix.binary }} =~ chain-spec-builder ]]; then + cargo build --locked --profile=production -p staging-${{ matrix.binary }} --bin ${{ matrix.binary }} + target/production/${{ matrix.binary }} -h + else + cargo build --locked --profile=production -p ${{ matrix.binary }} + target/production/${{ matrix.binary }} --version + fi + + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binary }} + path: target/production/${{ matrix.binary }} + + publish-release-draft: runs-on: ubuntu-latest - needs: [get-rust-versions, build-runtimes] + needs: [ get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} @@ -37,15 +68,15 @@ jobs: - name: Checkout uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 + - name: Prepare tooling run: | URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb - - name: Download artifacts - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 - - name: Prepare draft id: draft env: @@ -129,6 +160,30 @@ jobs: asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm asset_content_type: application/wasm + publish-binaries: + needs: [ publish-release-draft, build-binaries ] + continue-on-error: true + runs-on: ubuntu-latest + strategy: + matrix: + binary: [frame-omni-bencher, chain-spec-builder] + + steps: + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 + with: + name: ${{ matrix.binary }} + + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} + asset_path: ${{ github.workspace}}/${{ matrix.binary }} + asset_name: ${{ matrix.binary }} + asset_content_type: application/octet-stream + post_to_matrix: runs-on: ubuntu-latest needs: publish-release-draft -- GitLab From dfcfa4ab37819fddb4278eaac306adc0f194fd27 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 29 May 2024 16:34:42 +0800 Subject: [PATCH 085/106] Publish `chain-spec-builder` (#4518) marking it as release-able, attaching the same version number that is attached to other binaries such as `polkadot` and `polkadot-parachain`. I have more thoughts about the version number, though. The chain-spec builder is mainly a user of the `sp-genesis-builder` api. So the versioning should be such that it helps users know give a version of `sp-genesis-builder` in their runtime, which version of `chain-spec-builder` should they use? With this, we can possibly alter the version number to always match `sp-genesis-builder`. Fixes https://github.com/paritytech/polkadot-sdk/issues/4352 - [x] Add to release artifacts ~~similar to https://github.com/paritytech/polkadot-sdk/pull/4405~~ done here: https://github.com/paritytech/polkadot-sdk/pull/4557 --------- Co-authored-by: Branislav Kontur --- Cargo.lock | 2 +- substrate/bin/utils/chain-spec-builder/Cargo.toml | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6240d9db2ea..c971ebcba9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20529,7 +20529,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staging-chain-spec-builder" -version = "3.0.0" +version = "1.6.0" dependencies = [ "clap 4.5.3", "log", diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 5c8a3ab4e89..cc9aa402fd1 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "staging-chain-spec-builder" -version = "3.0.0" +version = "1.6.0" authors.workspace = true edition.workspace = true build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -publish = false +publish = true +description = "Utility for building chain-specification files for Substrate-based runtimes based on `sp-genesis-builder`" [lints] workspace = true -- GitLab From aa32faaebf64426becb2feeede347740eb7a3908 Mon Sep 17 00:00:00 2001 From: Joshua Cheong Date: Wed, 29 May 2024 18:11:16 +0800 Subject: [PATCH 086/106] Update README.md (#4623) Minor edit to a broken link for Rust Docs on the README.md Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f15c716a811..77348173252 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## ๐Ÿ“š Documentation -* [๐Ÿฆ€ rust-docs]([paritytech.github.io/](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html)) +* [๐Ÿฆ€ rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html) * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM * Other Resources: -- GitLab From d5053ac4161b6e3f634a3ffb6df07637058e9f55 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 29 May 2024 20:57:17 +0100 Subject: [PATCH 087/106] Change `XcmDryRunApi::dry_run_extrinsic` to take a call instead (#4621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up to the new `XcmDryRunApi` runtime API introduced in https://github.com/paritytech/polkadot-sdk/pull/3872. Taking an extrinsic means the frontend has to sign first to dry-run and once again to submit. This is bad UX which is solved by taking an `origin` and a `call`. This also has the benefit of being able to dry-run as any account, since it needs no signature. This is a breaking change since I changed `dry_run_extrinsic` to `dry_run_call`, however, this API is still only on testnets. The crates are bumped accordingly. As a part of this PR, I changed the name of the API from `XcmDryRunApi` to just `DryRunApi`, since it can be used for general dry-running :) Step towards https://github.com/paritytech/polkadot-sdk/issues/690. Example of calling the API with PAPI, not the best code, just testing :) ```ts // We just build a call, the arguments make it look very big though. const call = localApi.tx.XcmPallet.transfer_assets({ dest: XcmVersionedLocation.V4({ parents: 0, interior: XcmV4Junctions.X1(XcmV4Junction.Parachain(1000)) }), beneficiary: XcmVersionedLocation.V4({ parents: 0, interior: XcmV4Junctions.X1(XcmV4Junction.AccountId32({ network: undefined, id: Binary.fromBytes(encodeAccount(account.address)) })) }), weight_limit: XcmV3WeightLimit.Unlimited(), assets: XcmVersionedAssets.V4([{ id: { parents: 0, interior: XcmV4Junctions.Here() }, fun: XcmV3MultiassetFungibility.Fungible(1_000_000_000_000n) } ]), fee_asset_item: 0, }); // We call the API passing in a signed origin const result = await localApi.apis.XcmDryRunApi.dry_run_call( WestendRuntimeOriginCaller.system(DispatchRawOrigin.Signed(account.address)), call.decodedCall ); if (result.success && result.value.execution_result.success) { // We find the forwarded XCM we want. The first one going to AssetHub in this case. const xcmsToAssetHub = result.value.forwarded_xcms.find(([location, _]) => ( location.type === "V4" && location.value.parents === 0 && location.value.interior.type === "X1" && location.value.interior.value.type === "Parachain" && location.value.interior.value.value === 1000 ))!; // We can even find the delivery fees for that forwarded XCM. const deliveryFeesQuery = await localApi.apis.XcmPaymentApi.query_delivery_fees(xcmsToAssetHub[0], xcmsToAssetHub[1][0]); if (deliveryFeesQuery.success) { const amount = deliveryFeesQuery.value.type === "V4" && deliveryFeesQuery.value.value[0].fun.type === "Fungible" && deliveryFeesQuery.value.value[0].fun.value.valueOf() || 0n; // We store them in state somewhere. setDeliveryFees(formatAmount(BigInt(amount))); } } ``` --------- Co-authored-by: Bastian Kรถcher --- .../src/tests/xcm_fee_estimation.rs | 86 ++-------------- .../assets/asset-hub-rococo/src/lib.rs | 66 ++----------- .../assets/asset-hub-westend/src/lib.rs | 66 ++----------- .../runtimes/testing/penpal/src/lib.rs | 27 +++--- cumulus/xcm/xcm-emulator/src/lib.rs | 3 + polkadot/node/service/src/fake_runtime_api.rs | 4 +- polkadot/runtime/rococo/src/lib.rs | 60 +----------- polkadot/runtime/westend/src/lib.rs | 60 +----------- polkadot/xcm/pallet-xcm/src/lib.rs | 97 +++++++++++++++++-- .../src/dry_run.rs | 19 ++-- .../tests/fee_estimation.rs | 86 ++++++---------- .../xcm-fee-payment-runtime-api/tests/mock.rs | 57 +++-------- prdoc/pr_4621.prdoc | 43 ++++++++ 13 files changed, 237 insertions(+), 437 deletions(-) create mode 100644 prdoc/pr_4621.prdoc diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs index 3e311ef9565..dc89ef1f7a4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs @@ -17,16 +17,15 @@ use crate::imports::*; -use sp_keyring::AccountKeyring::Alice; -use sp_runtime::{generic, MultiSignature}; +use frame_system::RawOrigin; use xcm_fee_payment_runtime_api::{ - dry_run::runtime_decl_for_xcm_dry_run_api::XcmDryRunApiV1, + dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, }; /// We are able to dry-run and estimate the fees for a teleport between relay and system para. /// Scenario: Alice on Westend relay chain wants to teleport WND to Asset Hub. -/// We want to know the fees using the `XcmDryRunApi` and `XcmPaymentApi`. +/// We want to know the fees using the `DryRunApi` and `XcmPaymentApi`. #[test] fn teleport_relay_system_para_works() { let destination: Location = Parachain(1000).into(); // Asset Hub. @@ -42,6 +41,7 @@ fn teleport_relay_system_para_works() { ::new_ext().execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; + type OriginCaller = ::OriginCaller; let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { dest: Box::new(VersionedLocation::V4(destination.clone())), @@ -50,9 +50,8 @@ fn teleport_relay_system_para_works() { fee_asset_item: 0, weight_limit: Unlimited, }); - let sender = Alice; // Is the same as `WestendSender`. - let extrinsic = construct_extrinsic_westend(sender, call); - let result = Runtime::dry_run_extrinsic(extrinsic).unwrap(); + let origin = OriginCaller::system(RawOrigin::Signed(WestendSender::get())); + let result = Runtime::dry_run_call(origin, call).unwrap(); assert_eq!(result.forwarded_xcms.len(), 1); let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; assert_eq!(messages_to_query.len(), 1); @@ -105,7 +104,7 @@ fn teleport_relay_system_para_works() { /// We are able to dry-run and estimate the fees for a multi-hop XCM journey. /// Scenario: Alice on PenpalA has some WND and wants to send them to PenpalB. -/// We want to know the fees using the `XcmDryRunApi` and `XcmPaymentApi`. +/// We want to know the fees using the `DryRunApi` and `XcmPaymentApi`. #[test] fn multi_hop_works() { let destination = PenpalA::sibling_location_of(PenpalB::para_id()); @@ -142,6 +141,7 @@ fn multi_hop_works() { ::execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; + type OriginCaller = ::OriginCaller; let call = RuntimeCall::PolkadotXcm(pallet_xcm::Call::transfer_assets { dest: Box::new(VersionedLocation::V4(destination.clone())), @@ -150,9 +150,8 @@ fn multi_hop_works() { fee_asset_item: 0, weight_limit: Unlimited, }); - let sender = Alice; // Same as `PenpalASender`. - let extrinsic = construct_extrinsic_penpal(sender, call); - let result = Runtime::dry_run_extrinsic(extrinsic).unwrap(); + let origin = OriginCaller::system(RawOrigin::Signed(PenpalASender::get())); + let result = Runtime::dry_run_call(origin, call).unwrap(); assert_eq!(result.forwarded_xcms.len(), 1); let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; assert_eq!(messages_to_query.len(), 1); @@ -304,68 +303,3 @@ fn transfer_assets_para_to_para(test: ParaToParaThroughRelayTest) -> DispatchRes test.args.weight_limit, ) } - -// Constructs the SignedExtra component of an extrinsic for the Westend runtime. -fn construct_extrinsic_westend( - sender: sp_keyring::AccountKeyring, - call: westend_runtime::RuntimeCall, -) -> westend_runtime::UncheckedExtrinsic { - type Runtime = ::Runtime; - let account_id = ::AccountId::from(sender.public()); - let tip = 0; - let extra: westend_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(sp_runtime::generic::Era::immortal()), - frame_system::CheckNonce::::from( - frame_system::Pallet::::account(&account_id).nonce, - ), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - frame_metadata_hash_extension::CheckMetadataHash::::new(false), - ); - let raw_payload = westend_runtime::SignedPayload::new(call, extra).unwrap(); - let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); - let (call, extra, _) = raw_payload.deconstruct(); - westend_runtime::UncheckedExtrinsic::new_signed( - call, - account_id.into(), - MultiSignature::Sr25519(signature), - extra, - ) -} - -// Constructs the SignedExtra component of an extrinsic for the Westend runtime. -fn construct_extrinsic_penpal( - sender: sp_keyring::AccountKeyring, - call: penpal_runtime::RuntimeCall, -) -> penpal_runtime::UncheckedExtrinsic { - type Runtime = ::Runtime; - let account_id = ::AccountId::from(sender.public()); - let tip = 0; - let extra: penpal_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::immortal()), - frame_system::CheckNonce::::from( - frame_system::Pallet::::account(&account_id).nonce, - ), - frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), - ); - type SignedPayload = - generic::SignedPayload; - let raw_payload = SignedPayload::new(call, extra).unwrap(); - let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); - let (call, extra, _) = raw_payload.deconstruct(); - penpal_runtime::UncheckedExtrinsic::new_signed( - call, - account_id.into(), - MultiSignature::Sr25519(signature), - extra, - ) -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 4705d12e60c..e3a106c6ab9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -101,7 +101,7 @@ use xcm::{ IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -1332,67 +1332,13 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - use xcm::prelude::*; - - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::from), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) } - fn dry_run_xcm(origin_location: VersionedLocation, program: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm::prelude::*; - - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let program: Xcm = program.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = program.using_encoded(sp_core::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - program, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index a82094d6f8a..ececae3ef0a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -100,7 +100,7 @@ use xcm::latest::prelude::{ }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -1368,67 +1368,13 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - use xcm::prelude::*; - - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::from), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) } - fn dry_run_xcm(origin_location: VersionedLocation, program: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm::prelude::*; - - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let program: Xcm = program.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = program.using_encoded(sp_core::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - program, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) } } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 8afe56cddef..7e4a013117b 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -64,7 +64,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, Dispatchable}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -86,7 +86,7 @@ use xcm::{ IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -886,25 +886,19 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { use xcm_builder::InspectMessageQueues; use xcm_executor::RecordXcm; use xcm::prelude::*; - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. + let result = call.dispatch(origin.into()); + pallet_xcm::Pallet::::set_record_xcm(false); let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { + Ok(CallDryRunEffects { local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, @@ -918,7 +912,7 @@ impl_runtime_apis! { let origin_location: Location = origin_location.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Location version conversion failed with error: {:?}", error, ); @@ -926,13 +920,14 @@ impl_runtime_apis! { })?; let program: Xcm = program.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Xcm version conversion failed with error {:?}", error, ); XcmDryRunApiError::VersionedConversionFailed })?; let mut hash = program.using_encoded(sp_core::hashing::blake2_256); + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. let result = xcm_executor::XcmExecutor::::prepare_and_execute( origin_location, program, diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index a50f33951d0..1a3f3930cb3 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -215,6 +215,7 @@ pub trait Chain: TestExt { type RuntimeOrigin; type RuntimeEvent; type System; + type OriginCaller; fn account_id_of(seed: &str) -> AccountId { helpers::get_account_id_from_seed::(seed) @@ -366,6 +367,7 @@ macro_rules! decl_test_relay_chains { type RuntimeOrigin = $runtime::RuntimeOrigin; type RuntimeEvent = $runtime::RuntimeEvent; type System = $crate::SystemPallet::; + type OriginCaller = $runtime::OriginCaller; fn account_data_of(account: $crate::AccountIdOf) -> $crate::AccountData<$crate::Balance> { ::ext_wrapper(|| $crate::SystemPallet::::account(account).data.into()) @@ -600,6 +602,7 @@ macro_rules! decl_test_parachains { type RuntimeOrigin = $runtime::RuntimeOrigin; type RuntimeEvent = $runtime::RuntimeEvent; type System = $crate::SystemPallet::; + type OriginCaller = $runtime::OriginCaller; type Network = N; fn account_data_of(account: $crate::AccountIdOf) -> $crate::AccountData<$crate::Balance> { diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 03c4836020d..34abc76813f 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -416,8 +416,8 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(_: ::Extrinsic) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(_: (), _: ()) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { unimplemented!() } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index f0cc7e046f2..c2614f7e96e 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -135,7 +135,7 @@ use governance::{ TreasurySpender, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -1809,63 +1809,13 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::from), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + XcmPallet::dry_run_call::(origin, call) } fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let xcm: Xcm = xcm.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - xcm, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + XcmPallet::dry_run_xcm::(origin_location, xcm) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 4bf132d82c9..e6790329959 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -109,7 +109,7 @@ use xcm::{ use xcm_builder::PayOverXcm; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -2271,63 +2271,13 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::from), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + XcmPallet::dry_run_call::(origin, call) } fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let xcm: Xcm = xcm.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - xcm, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + XcmPallet::dry_run_xcm::(origin_location, xcm) } } diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 37fc121ba21..160d5273968 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -29,7 +29,9 @@ pub mod migration; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchErrorWithPostInfo, GetDispatchInfo, WithPostDispatchInfo}, + dispatch::{ + DispatchErrorWithPostInfo, GetDispatchInfo, PostDispatchInfo, WithPostDispatchInfo, + }, pallet_prelude::*, traits::{ Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, @@ -50,18 +52,22 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, - SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, InspectMessageQueues, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ AssetTransferError, CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, OnResponse, Properties, QueryHandler, QueryResponseStatus, - TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, XcmAssetTransfers, + RecordXcm, TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, + XcmAssetTransfers, }, AssetsInHolding, }; -use xcm_fee_payment_runtime_api::fees::Error as XcmPaymentApiError; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; @@ -2432,6 +2438,85 @@ impl Pallet { AccountIdConversion::::into_account_truncating(&ID) } + /// Dry-runs `call` with the given `origin`. + /// + /// Returns not only the call result and events, but also the local XCM, if any, + /// and any XCMs forwarded to other locations. + /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + pub fn dry_run_call( + origin: OriginCaller, + call: RuntimeCall, + ) -> Result::RuntimeEvent>, XcmDryRunApiError> + where + Runtime: crate::Config, + Router: InspectMessageQueues, + RuntimeCall: Dispatchable, + ::RuntimeOrigin: From, + { + crate::Pallet::::set_record_xcm(true); + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. + let result = call.dispatch(origin.into()); + crate::Pallet::::set_record_xcm(false); + let local_xcm = crate::Pallet::::recorded_xcm(); + let forwarded_xcms = Router::get_messages(); + let events: Vec<::RuntimeEvent> = + frame_system::Pallet::::read_events_no_consensus() + .map(|record| record.event.clone()) + .collect(); + Ok(CallDryRunEffects { + local_xcm: local_xcm.map(VersionedXcm::<()>::from), + forwarded_xcms, + emitted_events: events, + execution_result: result, + }) + } + + /// Dry-runs `xcm` with the given `origin_location`. + /// + /// Returns execution result, events, and any forwarded XCMs to other locations. + /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + pub fn dry_run_xcm( + origin_location: VersionedLocation, + xcm: VersionedXcm, + ) -> Result::RuntimeEvent>, XcmDryRunApiError> + where + Runtime: frame_system::Config, + Router: InspectMessageQueues, + XcmConfig: xcm_executor::Config, + { + let origin_location: Location = origin_location.try_into().map_err(|error| { + log::error!( + target: "xcm::DryRunApi::dry_run_xcm", + "Location version conversion failed with error: {:?}", + error, + ); + XcmDryRunApiError::VersionedConversionFailed + })?; + let xcm: Xcm = xcm.try_into().map_err(|error| { + log::error!( + target: "xcm::DryRunApi::dry_run_xcm", + "Xcm version conversion failed with error {:?}", + error, + ); + XcmDryRunApiError::VersionedConversionFailed + })?; + let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. + let result = xcm_executor::XcmExecutor::::prepare_and_execute( + origin_location, + xcm, + &mut hash, + Weight::MAX, // Max limit available for execution. + Weight::zero(), + ); + let forwarded_xcms = Router::get_messages(); + let events: Vec<::RuntimeEvent> = + frame_system::Pallet::::read_events_no_consensus() + .map(|record| record.event.clone()) + .collect(); + Ok(XcmDryRunEffects { forwarded_xcms, emitted_events: events, execution_result: result }) + } + pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { let message = Xcm::<()>::try_from(message) .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; @@ -3126,7 +3211,7 @@ impl CheckSuspension for Pallet { } } -impl xcm_executor::traits::RecordXcm for Pallet { +impl RecordXcm for Pallet { fn should_record() -> bool { ShouldRecordXcm::::get() } diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs index 62a422d6efe..9828acab402 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs @@ -19,16 +19,15 @@ //! that need to be paid. use codec::{Decode, Encode}; -use frame_support::pallet_prelude::{DispatchResult, TypeInfo}; -use sp_runtime::traits::Block as BlockT; +use frame_support::pallet_prelude::{DispatchResultWithPostInfo, TypeInfo}; use sp_std::vec::Vec; use xcm::prelude::*; /// Effects of dry-running an extrinsic. #[derive(Encode, Decode, Debug, TypeInfo)] -pub struct ExtrinsicDryRunEffects { +pub struct CallDryRunEffects { /// The result of executing the extrinsic. - pub execution_result: DispatchResult, + pub execution_result: DispatchResultWithPostInfo, /// The list of events fired by the extrinsic. pub emitted_events: Vec, /// The local XCM that was attempted to be executed, if any. @@ -55,12 +54,12 @@ sp_api::decl_runtime_apis! { /// If there's local execution, the location will be "Here". /// This vector can be used to calculate both execution and delivery fees. /// - /// Extrinsics or XCMs might fail when executed, this doesn't mean the result of these calls will be an `Err`. + /// Calls or XCMs might fail when executed, this doesn't mean the result of these calls will be an `Err`. /// In those cases, there might still be a valid result, with the execution error inside it. /// The only reasons why these calls might return an error are listed in the [`Error`] enum. - pub trait XcmDryRunApi { - /// Dry run extrinsic. - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, Error>; + pub trait DryRunApi { + /// Dry run call. + fn dry_run_call(origin: OriginCaller, call: Call) -> Result, Error>; /// Dry run XCM program fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, Error>; @@ -76,8 +75,4 @@ pub enum Error { /// Converting a versioned data structure from one version to another failed. #[codec(index = 1)] VersionedConversionFailed, - - /// Extrinsic was invalid. - #[codec(index = 2)] - InvalidExtrinsic, } diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs index 25a68090c22..33611c8a471 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs @@ -16,19 +16,17 @@ //! Tests for using both the XCM fee payment API and the dry-run API. -use frame_support::{ - dispatch::DispatchInfo, - pallet_prelude::{DispatchClass, Pays}, -}; +use frame_system::RawOrigin; use sp_api::ProvideRuntimeApi; use sp_runtime::testing::H256; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{dry_run::XcmDryRunApi, fees::XcmPaymentApi}; +use xcm_fee_payment_runtime_api::{dry_run::DryRunApi, fees::XcmPaymentApi}; mod mock; use mock::{ - extra, fake_message_hash, new_test_ext_with_balances, new_test_ext_with_balances_and_assets, - DeliveryFees, ExistentialDeposit, HereLocation, RuntimeCall, RuntimeEvent, TestClient, TestXt, + fake_message_hash, new_test_ext_with_balances, new_test_ext_with_balances_and_assets, + DeliveryFees, ExistentialDeposit, HereLocation, OriginCaller, RuntimeCall, RuntimeEvent, + TestClient, }; // Scenario: User `1` in the local chain (id 2000) wants to transfer assets to account `[0u8; 32]` @@ -50,24 +48,22 @@ fn fee_estimation_for_teleport() { new_test_ext_with_balances_and_assets(balances, assets).execute_with(|| { let client = TestClient; let runtime_api = client.runtime_api(); - let extrinsic = TestXt::new( - RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { - dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), - beneficiary: Box::new(VersionedLocation::from(AccountId32 { - id: [0u8; 32], - network: None, - })), - assets: Box::new(VersionedAssets::from(vec![ - (Here, 100u128).into(), - (Parent, 20u128).into(), - ])), - fee_asset_item: 1, // Fees are paid with the RelayToken - weight_limit: Unlimited, - }), - Some((who, extra())), - ); + let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), + beneficiary: Box::new(VersionedLocation::from(AccountId32 { + id: [0u8; 32], + network: None, + })), + assets: Box::new(VersionedAssets::from(vec![ + (Here, 100u128).into(), + (Parent, 20u128).into(), + ])), + fee_asset_item: 1, // Fees are paid with the RelayToken + weight_limit: Unlimited, + }); + let origin = OriginCaller::system(RawOrigin::Signed(who)); let dry_run_effects = - runtime_api.dry_run_extrinsic(H256::zero(), extrinsic).unwrap().unwrap(); + runtime_api.dry_run_call(H256::zero(), origin, call).unwrap().unwrap(); assert_eq!( dry_run_effects.local_xcm, @@ -130,14 +126,6 @@ fn fee_estimation_for_teleport() { message: send_message.clone(), message_id: fake_message_hash(&send_message), }), - RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_parts(107074070, 0), /* Will break if weights get - * updated. */ - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } - }), ] ); @@ -216,21 +204,19 @@ fn dry_run_reserve_asset_transfer() { new_test_ext_with_balances_and_assets(balances, assets).execute_with(|| { let client = TestClient; let runtime_api = client.runtime_api(); - let extrinsic = TestXt::new( - RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { - dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), - beneficiary: Box::new(VersionedLocation::from(AccountId32 { - id: [0u8; 32], - network: None, - })), - assets: Box::new(VersionedAssets::from((Parent, 100u128))), - fee_asset_item: 0, - weight_limit: Unlimited, - }), - Some((who, extra())), - ); + let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), + beneficiary: Box::new(VersionedLocation::from(AccountId32 { + id: [0u8; 32], + network: None, + })), + assets: Box::new(VersionedAssets::from((Parent, 100u128))), + fee_asset_item: 0, + weight_limit: Unlimited, + }); + let origin = OriginCaller::system(RawOrigin::Signed(who)); let dry_run_effects = - runtime_api.dry_run_extrinsic(H256::zero(), extrinsic).unwrap().unwrap(); + runtime_api.dry_run_call(H256::zero(), origin, call).unwrap().unwrap(); assert_eq!( dry_run_effects.local_xcm, @@ -281,14 +267,6 @@ fn dry_run_reserve_asset_transfer() { message: send_message.clone(), message_id: fake_message_hash(&send_message), }), - RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_parts(107074066, 0), /* Will break if weights get - * updated. */ - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } - }), ] ); }); diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs index a1794ab99de..aa6c1422b60 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs @@ -29,7 +29,7 @@ use frame_support::{ use frame_system::{EnsureRoot, RawOrigin as SystemRawOrigin}; use pallet_xcm::TestWeightInfo; use sp_runtime::{ - traits::{Block as BlockT, Get, IdentityLookup, MaybeEquivalence, TryConvert}, + traits::{Dispatchable, Get, IdentityLookup, MaybeEquivalence, TryConvert}, BuildStorage, SaturatedConversion, }; use sp_std::{cell::RefCell, marker::PhantomData}; @@ -45,7 +45,7 @@ use xcm_executor::{ }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunApi, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, DryRunApi, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::{Error as XcmPaymentApiError, XcmPaymentApi}, }; @@ -58,30 +58,13 @@ construct_runtime! { } } -pub type SignedExtra = ( - // frame_system::CheckEra, - // frame_system::CheckNonce, - frame_system::CheckWeight, -); +pub type SignedExtra = (frame_system::CheckWeight,); pub type TestXt = sp_runtime::testing::TestXt; type Block = sp_runtime::testing::Block; type Balance = u128; type AssetIdForAssetsPallet = u32; type AccountId = u64; -pub fn extra() -> SignedExtra { - (frame_system::CheckWeight::new(),) -} - -type Executive = frame_executive::Executive< - TestRuntime, - Block, - frame_system::ChainContext, - TestRuntime, - AllPalletsWithSystem, - (), ->; - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for TestRuntime { type Block = Block; @@ -467,29 +450,21 @@ sp_api::mock_impl_runtime_apis! { } } - impl XcmDryRunApi for RuntimeApi { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { + impl DryRunApi for RuntimeApi { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { use xcm_executor::RecordXcm; - // We want to record the XCM that's executed, so we can return it. pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - // Nothing gets committed to storage in runtime APIs, so there's no harm in leaving the flag as true. + let result = call.dispatch(origin.into()); + pallet_xcm::Pallet::::set_record_xcm(false); let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); let forwarded_xcms = sent_xcm() - .into_iter() - .map(|(location, message)| ( - VersionedLocation::from(location), - vec![VersionedXcm::from(message)], - )).collect(); - let events: Vec = System::events().iter().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { + .into_iter() + .map(|(location, message)| ( + VersionedLocation::from(location), + vec![VersionedXcm::from(message)], + )).collect(); + let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); + Ok(CallDryRunEffects { local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, @@ -500,7 +475,7 @@ sp_api::mock_impl_runtime_apis! { fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { let origin_location: Location = origin_location.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Location version conversion failed with error: {:?}", error, ); @@ -508,7 +483,7 @@ sp_api::mock_impl_runtime_apis! { })?; let xcm: Xcm = xcm.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Xcm version conversion failed with error {:?}", error, ); diff --git a/prdoc/pr_4621.prdoc b/prdoc/pr_4621.prdoc new file mode 100644 index 00000000000..ebc06b92b39 --- /dev/null +++ b/prdoc/pr_4621.prdoc @@ -0,0 +1,43 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Change XcmDryRunApi::dry_run_extrinsic to take a call instead + +doc: + - audience: Runtime User + description: | + The XcmDryRunApi now dry-run calls instead of extrinsics. + This means it's possible to dry-run an extrinsic before signing it, + allowing for seamless dry-running in dapps. + Additionally, calls can now be dry-run for different accounts. + - audience: Runtime Dev + description: | + The XcmDryRunApi::dry_run_extrinsic function was replaced by + XcmDryRunApi::dry_run_call. + This new function takes an origin (OriginCaller, the encodable inner variant) + and a call instead of an extrinsic. + This was needed to not require the user signing twice, once for the dry-run and + a second time to actually submit the extrinsic. + Additionally, calls can now be dry-run for different accounts. + The implementation for this runtime API is now simpler, being `call.dispatch(origin.into())` + instead of using the `Executive`. + +crates: + - name: xcm-fee-payment-runtime-api + bump: major + - name: penpal-runtime + bump: major + - name: xcm-emulator + bump: minor + - name: polkadot-service + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: pallet-xcm + bump: minor -- GitLab From f4dc8d22b49866a10fb720acee6a2c0e3249e22b Mon Sep 17 00:00:00 2001 From: eskimor Date: Wed, 29 May 2024 23:21:52 +0200 Subject: [PATCH 088/106] Broker new price adapter (#4521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #4360 Also rename: AllowedRenewals -> PotentialRenewals to avoid confusion of future readers. (An entry in `AllowedRenewals` is not enough to allow a renewal, the assignment also has to be complete, which is only checked afterwards.) - [x] Does not work with renewals as is - fix. - [x] More tests - [x] PR docs Edit 1: (Relevant blog post: https://grillapp.net/12935/agile-coretime-pricing-explained-166522?ref=29715) --------- Co-authored-by: eskimor Co-authored-by: Dรณnal Murray Co-authored-by: command-bot <> --- Cargo.lock | 1 + .../coretime/coretime-rococo/src/coretime.rs | 2 +- .../coretime/coretime-rococo/src/lib.rs | 1 + .../src/weights/pallet_broker.rs | 8 +- .../coretime/coretime-westend/src/coretime.rs | 2 +- .../coretime/coretime-westend/src/lib.rs | 1 + .../src/weights/pallet_broker.rs | 8 +- prdoc/pr_4521.prdoc | 28 ++ substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/frame/broker/Cargo.toml | 1 + substrate/frame/broker/src/adapt_price.rs | 249 ++++++++--- substrate/frame/broker/src/benchmarking.rs | 26 +- .../frame/broker/src/dispatchable_impls.rs | 63 +-- substrate/frame/broker/src/lib.rs | 25 +- substrate/frame/broker/src/migration.rs | 59 +++ substrate/frame/broker/src/mock.rs | 6 +- substrate/frame/broker/src/tests.rs | 118 ++++-- substrate/frame/broker/src/tick_impls.rs | 62 ++- substrate/frame/broker/src/types.rs | 27 +- substrate/frame/broker/src/utility_impls.rs | 21 +- substrate/frame/broker/src/weights.rs | 388 +++++++++--------- 21 files changed, 703 insertions(+), 395 deletions(-) create mode 100644 prdoc/pr_4521.prdoc diff --git a/Cargo.lock b/Cargo.lock index c971ebcba9c..e8732f64efa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9827,6 +9827,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std 14.0.0", + "sp-tracing 16.0.0", ] [[package]] diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index 742dd50f6fa..ec3a4f31202 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -232,5 +232,5 @@ impl pallet_broker::Config for Runtime { type WeightInfo = weights::pallet_broker::WeightInfo; type PalletId = BrokerPalletId; type AdminOrigin = EnsureRoot; - type PriceAdapter = pallet_broker::Linear; + type PriceAdapter = pallet_broker::CenterTargetPrice; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index f43bb1c1e41..b7880279048 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -112,6 +112,7 @@ pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_broker::migration::MigrateV0ToV1, + pallet_broker::migration::MigrateV1ToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 89b1c4c8663..5c9175a18d9 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -154,8 +154,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) @@ -337,8 +337,8 @@ impl pallet_broker::WeightInfo for WeightInfo { } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `957` diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index 41cbc62fa21..a5e219b9897 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -245,5 +245,5 @@ impl pallet_broker::Config for Runtime { type WeightInfo = weights::pallet_broker::WeightInfo; type PalletId = BrokerPalletId; type AdminOrigin = EnsureRoot; - type PriceAdapter = pallet_broker::Linear; + type PriceAdapter = pallet_broker::CenterTargetPrice; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index ff2456dc177..78b963e3b40 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -111,6 +111,7 @@ pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, + pallet_broker::migration::MigrateV1ToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 13d5fcf3898..7e1c832a909 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -152,8 +152,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) @@ -335,8 +335,8 @@ impl pallet_broker::WeightInfo for WeightInfo { } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `556` diff --git a/prdoc/pr_4521.prdoc b/prdoc/pr_4521.prdoc new file mode 100644 index 00000000000..a8b42a2c7ee --- /dev/null +++ b/prdoc/pr_4521.prdoc @@ -0,0 +1,28 @@ +title: AdaptPrice trait is now price controlled + +doc: + - audience: Runtime Dev + description: | + The broker pallet price adaptation interface is changed to be less opinionated and more + information is made available to the `AdaptPrice` trait. A new example impl is included which + adapts the price based not on the number of cores sold, but rather on the price that was + achieved during the sale to mitigate a potential price manipulation vector. More information + here: + + https://github.com/paritytech/polkadot-sdk/issues/4360 + + - audience: Runtime User + description: | + The price controller of the Rococo and Westend Coretime chain will be + adjusted with this release. This will very likely be used in the + fellowship production runtime to have a much larger leadin. This fixes a + price manipulation issue we discovered with the Kusama launch. + +crates: + - name: pallet-broker + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 7d9128bb940..801abc28d3d 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2145,7 +2145,7 @@ impl pallet_broker::Config for Runtime { type WeightInfo = (); type PalletId = BrokerPalletId; type AdminOrigin = EnsureRoot; - type PriceAdapter = pallet_broker::Linear; + type PriceAdapter = pallet_broker::CenterTargetPrice; } parameter_types! { diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 8f3f30ec58e..8a84fbfdfb7 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -30,6 +30,7 @@ frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-io = { path = "../../primitives/io" } +sp-tracing = { path = "../../primitives/tracing" } pretty_assertions = "1.3.0" [features] diff --git a/substrate/frame/broker/src/adapt_price.rs b/substrate/frame/broker/src/adapt_price.rs index fbcd7afdf0d..9b2e1dd8997 100644 --- a/substrate/frame/broker/src/adapt_price.rs +++ b/substrate/frame/broker/src/adapt_price.rs @@ -17,59 +17,122 @@ #![deny(missing_docs)] -use crate::CoreIndex; +use crate::{CoreIndex, SaleInfoRecord}; use sp_arithmetic::{traits::One, FixedU64}; -use sp_runtime::Saturating; +use sp_runtime::{FixedPointNumber, FixedPointOperand, Saturating}; + +/// Performance of a past sale. +#[derive(Copy, Clone)] +pub struct SalePerformance { + /// The price at which the last core was sold. + /// + /// Will be `None` if no cores have been offered. + pub sellout_price: Option, + + /// The minimum price that was achieved in this sale. + pub end_price: Balance, + + /// The number of cores we want to sell, ideally. + pub ideal_cores_sold: CoreIndex, + + /// Number of cores which are/have been offered for sale. + pub cores_offered: CoreIndex, + + /// Number of cores which have been sold; never more than cores_offered. + pub cores_sold: CoreIndex, +} + +/// Result of `AdaptPrice::adapt_price`. +#[derive(Copy, Clone)] +pub struct AdaptedPrices { + /// New minimum price to use. + pub end_price: Balance, + + /// Price the controller is optimizing for. + /// + /// This is the price "expected" by the controller based on the previous sale. We assume that + /// sales in this period will be around this price, assuming stable market conditions. + /// + /// Think of it as the expected market price. This can be used for determining what to charge + /// for renewals, that don't yet have any price information for example. E.g. for expired + /// legacy leases. + pub target_price: Balance, +} + +impl SalePerformance { + /// Construct performance via data from a `SaleInfoRecord`. + pub fn from_sale(record: &SaleInfoRecord) -> Self { + Self { + sellout_price: record.sellout_price, + end_price: record.end_price, + ideal_cores_sold: record.ideal_cores_sold, + cores_offered: record.cores_offered, + cores_sold: record.cores_sold, + } + } + + #[cfg(test)] + fn new(sellout_price: Option, end_price: Balance) -> Self { + Self { sellout_price, end_price, ideal_cores_sold: 0, cores_offered: 0, cores_sold: 0 } + } +} /// Type for determining how to set price. -pub trait AdaptPrice { +pub trait AdaptPrice { /// Return the factor by which the regular price must be multiplied during the leadin period. /// /// - `when`: The amount through the leadin period; between zero and one. fn leadin_factor_at(when: FixedU64) -> FixedU64; - /// Return the correction factor by which the regular price must be multiplied based on market - /// performance. + + /// Return adapted prices for next sale. /// - /// - `sold`: The number of cores sold. - /// - `target`: The target number of cores to be sold (must be larger than zero). - /// - `limit`: The maximum number of cores to be sold. - fn adapt_price(sold: CoreIndex, target: CoreIndex, limit: CoreIndex) -> FixedU64; + /// Based on the previous sale's performance. + fn adapt_price(performance: SalePerformance) -> AdaptedPrices; } -impl AdaptPrice for () { +impl AdaptPrice for () { fn leadin_factor_at(_: FixedU64) -> FixedU64 { FixedU64::one() } - fn adapt_price(_: CoreIndex, _: CoreIndex, _: CoreIndex) -> FixedU64 { - FixedU64::one() + fn adapt_price(performance: SalePerformance) -> AdaptedPrices { + let price = performance.sellout_price.unwrap_or(performance.end_price); + AdaptedPrices { end_price: price, target_price: price } } } -/// Simple implementation of `AdaptPrice` giving a monotonic leadin and a linear price change based -/// on cores sold. -pub struct Linear; -impl AdaptPrice for Linear { +/// Simple implementation of `AdaptPrice` with two linear phases. +/// +/// One steep one downwards to the target price, which is 1/10 of the maximum price and a more flat +/// one down to the minimum price, which is 1/100 of the maximum price. +pub struct CenterTargetPrice(core::marker::PhantomData); + +impl AdaptPrice for CenterTargetPrice { fn leadin_factor_at(when: FixedU64) -> FixedU64 { - FixedU64::from(2).saturating_sub(when) - } - fn adapt_price(sold: CoreIndex, target: CoreIndex, limit: CoreIndex) -> FixedU64 { - if sold <= target { - // Range of [0.5, 1.0]. - FixedU64::from_rational(1, 2).saturating_add(FixedU64::from_rational( - sold.into(), - target.saturating_mul(2).into(), - )) + if when <= FixedU64::from_rational(1, 2) { + FixedU64::from(100).saturating_sub(when.saturating_mul(180.into())) } else { - // Range of (1.0, 2]. - - // Unchecked math: In this branch we know that sold > target. The limit must be >= sold - // by construction, and thus target must be < limit. - FixedU64::one().saturating_add(FixedU64::from_rational( - (sold - target).into(), - (limit - target).into(), - )) + FixedU64::from(19).saturating_sub(when.saturating_mul(18.into())) } } + + fn adapt_price(performance: SalePerformance) -> AdaptedPrices { + let Some(sellout_price) = performance.sellout_price else { + return AdaptedPrices { + end_price: performance.end_price, + target_price: FixedU64::from(10).saturating_mul_int(performance.end_price), + } + }; + + let price = FixedU64::from_rational(1, 10).saturating_mul_int(sellout_price); + let price = if price == Balance::zero() { + // We could not recover from a price equal 0 ever. + sellout_price + } else { + price + }; + + AdaptedPrices { end_price: price, target_price: sellout_price } + } } #[cfg(test)] @@ -78,37 +141,103 @@ mod tests { #[test] fn linear_no_panic() { - for limit in 0..10 { - for target in 1..10 { - for sold in 0..=limit { - let price = Linear::adapt_price(sold, target, limit); - - if sold > target { - assert!(price > FixedU64::one()); - } else { - assert!(price <= FixedU64::one()); - } - } + for sellout in 0..11 { + for price in 0..10 { + let sellout_price = if sellout == 11 { None } else { Some(sellout) }; + CenterTargetPrice::adapt_price(SalePerformance::new(sellout_price, price)); } } } #[test] - fn linear_bound_check() { - // Using constraints from pallet implementation i.e. `limit >= sold`. - // Check extremes - let limit = 10; - let target = 5; - - // Maximally sold: `sold == limit` - assert_eq!(Linear::adapt_price(limit, target, limit), FixedU64::from_float(2.0)); - // Ideally sold: `sold == target` - assert_eq!(Linear::adapt_price(target, target, limit), FixedU64::one()); - // Minimally sold: `sold == 0` - assert_eq!(Linear::adapt_price(0, target, limit), FixedU64::from_float(0.5)); - // Optimistic target: `target == limit` - assert_eq!(Linear::adapt_price(limit, limit, limit), FixedU64::one()); - // Pessimistic target: `target == 0` - assert_eq!(Linear::adapt_price(limit, 0, limit), FixedU64::from_float(2.0)); + fn leadin_price_bound_check() { + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from(0)), + FixedU64::from(100) + ); + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from_rational(1, 4)), + FixedU64::from(55) + ); + + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from_float(0.5)), + FixedU64::from(10) + ); + + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from_rational(3, 4)), + FixedU64::from_float(5.5) + ); + assert_eq!(CenterTargetPrice::::leadin_factor_at(FixedU64::one()), FixedU64::one()); + } + + #[test] + fn no_op_sale_is_good() { + let prices = CenterTargetPrice::adapt_price(SalePerformance::new(None, 1)); + assert_eq!(prices.target_price, 10); + assert_eq!(prices.end_price, 1); + } + + #[test] + fn price_stays_stable_on_optimal_sale() { + // Check price stays stable if sold at the optimal price: + let mut performance = SalePerformance::new(Some(1000), 100); + for _ in 0..10 { + let prices = CenterTargetPrice::adapt_price(performance); + performance.sellout_price = Some(1000); + performance.end_price = prices.end_price; + + assert!(prices.end_price <= 101); + assert!(prices.end_price >= 99); + assert!(prices.target_price <= 1001); + assert!(prices.target_price >= 999); + } + } + + #[test] + fn price_adjusts_correctly_upwards() { + let performance = SalePerformance::new(Some(10_000), 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 10_000); + assert_eq!(prices.end_price, 1000); + } + + #[test] + fn price_adjusts_correctly_downwards() { + let performance = SalePerformance::new(Some(100), 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 100); + assert_eq!(prices.end_price, 10); + } + + #[test] + fn price_never_goes_to_zero_and_recovers() { + // Check price stays stable if sold at the optimal price: + let sellout_price = 1; + let mut performance = SalePerformance::new(Some(sellout_price), 1); + for _ in 0..11 { + let prices = CenterTargetPrice::adapt_price(performance); + performance.sellout_price = Some(sellout_price); + performance.end_price = prices.end_price; + + assert!(prices.end_price <= sellout_price); + assert!(prices.end_price > 0); + } + } + + #[test] + fn renewal_price_is_correct_on_no_sale() { + let performance = SalePerformance::new(None, 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 1000); + assert_eq!(prices.end_price, 100); + } + + #[test] + fn renewal_price_is_sell_out() { + let performance = SalePerformance::new(Some(1000), 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 1000); } } diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 7533e3dc68c..9cb5ad096c8 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -214,8 +214,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 20u32.into(), - regular_price: 10u32.into(), + start_price: 1000u32.into(), + end_price: 10u32.into(), region_begin: latest_region_begin + config.region_length, region_end: latest_region_begin + config.region_length * 2, ideal_cores_sold: 0, @@ -288,8 +288,8 @@ mod benches { #[extrinsic_call] _(RawOrigin::Signed(caller), region.core); - let id = AllowedRenewalId { core: region.core, when: region.begin + region_len * 2 }; - assert!(AllowedRenewals::::get(id).is_some()); + let id = PotentialRenewalId { core: region.core, when: region.begin + region_len * 2 }; + assert!(PotentialRenewals::::get(id).is_some()); Ok(()) } @@ -670,20 +670,20 @@ mod benches { (T::TimeslicePeriod::get() * (region_len * 3).into()).try_into().ok().unwrap(), ); - let id = AllowedRenewalId { core, when }; - let record = AllowedRenewalRecord { + let id = PotentialRenewalId { core, when }; + let record = PotentialRenewalRecord { price: 1u32.into(), completion: CompletionStatus::Complete(new_schedule()), }; - AllowedRenewals::::insert(id, record); + PotentialRenewals::::insert(id, record); let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] _(RawOrigin::Signed(caller), core, when); - assert!(AllowedRenewals::::get(id).is_none()); - assert_last_event::(Event::AllowedRenewalDropped { core, when }.into()); + assert!(PotentialRenewals::::get(id).is_none()); + assert_last_event::(Event::PotentialRenewalDropped { core, when }.into()); Ok(()) } @@ -776,12 +776,12 @@ mod benches { let config = new_config_record::(); let now = frame_system::Pallet::::block_number(); - let price = 10u32.into(); + let end_price = 10u32.into(); let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); let sale = SaleInfoRecordOf:: { sale_start: now, leadin_length: Zero::zero(), - price, + end_price, sellout_price: None, region_begin: commit_timeslice, region_end: commit_timeslice.saturating_add(config.region_length), @@ -815,8 +815,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 20u32.into(), - regular_price: 10u32.into(), + start_price: 1000u32.into(), + end_price: 10u32.into(), region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 45a0a514c30..79c1a1f7979 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -70,7 +70,10 @@ impl Pallet { Ok(()) } - pub(crate) fn do_start_sales(price: BalanceOf, extra_cores: CoreIndex) -> DispatchResult { + pub(crate) fn do_start_sales( + end_price: BalanceOf, + extra_cores: CoreIndex, + ) -> DispatchResult { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; // Determine the core count @@ -93,7 +96,7 @@ impl Pallet { let old_sale = SaleInfoRecord { sale_start: now, leadin_length: Zero::zero(), - price, + end_price, sellout_price: None, region_begin: commit_timeslice, region_end: commit_timeslice.saturating_add(config.region_length), @@ -102,7 +105,7 @@ impl Pallet { cores_offered: 0, cores_sold: 0, }; - Self::deposit_event(Event::::SalesStarted { price, core_count }); + Self::deposit_event(Event::::SalesStarted { price: end_price, core_count }); Self::rotate_sale(old_sale, &config, &status); Status::::put(&status); Ok(()) @@ -121,12 +124,8 @@ impl Pallet { let price = Self::sale_price(&sale, now); ensure!(price_limit >= price, Error::::Overpriced); - Self::charge(&who, price)?; - let core = sale.first_core.saturating_add(sale.cores_sold); - sale.cores_sold.saturating_inc(); - if sale.cores_sold <= sale.ideal_cores_sold || sale.sellout_price.is_none() { - sale.sellout_price = Some(price); - } + let core = Self::purchase_core(&who, price, &mut sale)?; + SaleInfo::::put(&sale); let id = Self::issue(core, sale.region_begin, sale.region_end, Some(who.clone()), Some(price)); @@ -135,7 +134,7 @@ impl Pallet { Ok(id) } - /// Must be called on a core in `AllowedRenewals` whose value is a timeslice equal to the + /// Must be called on a core in `PotentialRenewals` whose value is a timeslice equal to the /// current sale status's `region_end`. pub(crate) fn do_renew(who: T::AccountId, core: CoreIndex) -> Result { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; @@ -143,14 +142,15 @@ impl Pallet { let mut sale = SaleInfo::::get().ok_or(Error::::NoSales)?; Self::ensure_cores_for_sale(&status, &sale)?; - let renewal_id = AllowedRenewalId { core, when: sale.region_begin }; - let record = AllowedRenewals::::get(renewal_id).ok_or(Error::::NotAllowed)?; + let renewal_id = PotentialRenewalId { core, when: sale.region_begin }; + let record = PotentialRenewals::::get(renewal_id).ok_or(Error::::NotAllowed)?; let workload = record.completion.drain_complete().ok_or(Error::::IncompleteAssignment)?; let old_core = core; - let core = sale.first_core.saturating_add(sale.cores_sold); - Self::charge(&who, record.price)?; + + let core = Self::purchase_core(&who, record.price, &mut sale)?; + Self::deposit_event(Event::Renewed { who, old_core, @@ -161,19 +161,24 @@ impl Pallet { workload: workload.clone(), }); - sale.cores_sold.saturating_inc(); - Workplan::::insert((sale.region_begin, core), &workload); let begin = sale.region_end; let price_cap = record.price + config.renewal_bump * record.price; let now = frame_system::Pallet::::block_number(); let price = Self::sale_price(&sale, now).min(price_cap); - let new_record = AllowedRenewalRecord { price, completion: Complete(workload) }; - AllowedRenewals::::remove(renewal_id); - AllowedRenewals::::insert(AllowedRenewalId { core, when: begin }, &new_record); + log::debug!( + "Renew with: sale price: {:?}, price cap: {:?}, old price: {:?}", + price, + price_cap, + record.price + ); + let new_record = PotentialRenewalRecord { price, completion: Complete(workload) }; + PotentialRenewals::::remove(renewal_id); + PotentialRenewals::::insert(PotentialRenewalId { core, when: begin }, &new_record); SaleInfo::::put(&sale); if let Some(workload) = new_record.completion.drain_complete() { + log::debug!("Recording renewable price for next run: {:?}", price); Self::deposit_event(Event::Renewable { core, price, begin, workload }); } Ok(core) @@ -281,17 +286,19 @@ impl Pallet { let duration = region.end.saturating_sub(region_id.begin); if duration == config.region_length && finality == Finality::Final { if let Some(price) = region.paid { - let renewal_id = AllowedRenewalId { core: region_id.core, when: region.end }; - let assigned = match AllowedRenewals::::get(renewal_id) { - Some(AllowedRenewalRecord { completion: Partial(w), price: p }) + let renewal_id = PotentialRenewalId { core: region_id.core, when: region.end }; + let assigned = match PotentialRenewals::::get(renewal_id) { + Some(PotentialRenewalRecord { completion: Partial(w), price: p }) if price == p => w, _ => CoreMask::void(), } | region_id.mask; let workload = if assigned.is_complete() { Complete(workplan) } else { Partial(assigned) }; - let record = AllowedRenewalRecord { price, completion: workload }; - AllowedRenewals::::insert(&renewal_id, &record); + let record = PotentialRenewalRecord { price, completion: workload }; + // Note: This entry alone does not yet actually allow renewals (the completion + // status has to be complete for `do_renew` to accept it). + PotentialRenewals::::insert(&renewal_id, &record); if let Some(workload) = record.completion.drain_complete() { Self::deposit_event(Event::Renewable { core: region_id.core, @@ -444,10 +451,10 @@ impl Pallet { pub(crate) fn do_drop_renewal(core: CoreIndex, when: Timeslice) -> DispatchResult { let status = Status::::get().ok_or(Error::::Uninitialized)?; ensure!(status.last_committed_timeslice >= when, Error::::StillValid); - let id = AllowedRenewalId { core, when }; - ensure!(AllowedRenewals::::contains_key(id), Error::::UnknownRenewal); - AllowedRenewals::::remove(id); - Self::deposit_event(Event::AllowedRenewalDropped { core, when }); + let id = PotentialRenewalId { core, when }; + ensure!(PotentialRenewals::::contains_key(id), Error::::UnknownRenewal); + PotentialRenewals::::remove(id); + Self::deposit_event(Event::PotentialRenewalDropped { core, when }); Ok(()) } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index d59c4c9c6b2..0774c02e1cf 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -65,7 +65,7 @@ pub mod pallet { use sp_runtime::traits::{Convert, ConvertBack}; use sp_std::vec::Vec; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -92,7 +92,7 @@ pub mod pallet { type Coretime: CoretimeInterface; /// The algorithm to determine the next price on the basis of market performance. - type PriceAdapter: AdaptPrice; + type PriceAdapter: AdaptPrice>; /// Reversible conversion from local balance to Relay-chain balance. This will typically be /// the `Identity`, but provided just in case the chains use different representations. @@ -136,10 +136,12 @@ pub mod pallet { #[pallet::storage] pub type SaleInfo = StorageValue<_, SaleInfoRecordOf, OptionQuery>; - /// Records of allowed renewals. + /// Records of potential renewals. + /// + /// Renewals will only actually be allowed if `CompletionStatus` is actually `Complete`. #[pallet::storage] - pub type AllowedRenewals = - StorageMap<_, Twox64Concat, AllowedRenewalId, AllowedRenewalRecordOf, OptionQuery>; + pub type PotentialRenewals = + StorageMap<_, Twox64Concat, PotentialRenewalId, PotentialRenewalRecordOf, OptionQuery>; /// The current (unassigned or provisionally assigend) Regions. #[pallet::storage] @@ -290,14 +292,13 @@ pub mod pallet { /// The price of Bulk Coretime at the beginning of the Leadin Period. start_price: BalanceOf, /// The price of Bulk Coretime after the Leadin Period. - regular_price: BalanceOf, + end_price: BalanceOf, /// The first timeslice of the Regions which are being sold in this sale. region_begin: Timeslice, /// The timeslice on which the Regions which are being sold in the sale terminate. /// (i.e. One after the last timeslice which the Regions control.) region_end: Timeslice, - /// The number of cores we want to sell, ideally. Selling this amount would result in - /// no change to the price for the next sale. + /// The number of cores we want to sell, ideally. ideal_cores_sold: CoreIndex, /// Number of cores which are/have been offered for sale. cores_offered: CoreIndex, @@ -413,7 +414,7 @@ pub mod pallet { assignment: Vec<(CoreAssignment, PartsOf57600)>, }, /// Some historical Instantaneous Core Pool payment record has been dropped. - AllowedRenewalDropped { + PotentialRenewalDropped { /// The timeslice whose renewal is no longer available. when: Timeslice, /// The core whose workload is no longer available to be renewed for `when`. @@ -558,7 +559,7 @@ pub mod pallet { /// Begin the Bulk Coretime sales rotation. /// /// - `origin`: Must be Root or pass `AdminOrigin`. - /// - `initial_price`: The price of Bulk Coretime in the first sale. + /// - `end_price`: The price after the leadin period of Bulk Coretime in the first sale. /// - `extra_cores`: Number of extra cores that should be requested on top of the cores /// required for `Reservations` and `Leases`. /// @@ -570,11 +571,11 @@ pub mod pallet { ))] pub fn start_sales( origin: OriginFor, - initial_price: BalanceOf, + end_price: BalanceOf, extra_cores: CoreIndex, ) -> DispatchResultWithPostInfo { T::AdminOrigin::ensure_origin_or_root(origin)?; - Self::do_start_sales(initial_price, extra_cores)?; + Self::do_start_sales(end_price, extra_cores)?; Ok(Pays::No.into()) } diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs index 95aa28250a6..f354e447fe8 100644 --- a/substrate/frame/broker/src/migration.rs +++ b/substrate/frame/broker/src/migration.rs @@ -77,6 +77,57 @@ mod v1 { } } +mod v2 { + use super::*; + use frame_support::{ + pallet_prelude::{OptionQuery, Twox64Concat}, + storage_alias, + }; + + #[storage_alias] + pub type AllowedRenewals = StorageMap< + Pallet, + Twox64Concat, + PotentialRenewalId, + PotentialRenewalRecordOf, + OptionQuery, + >; + + pub struct MigrateToV2Impl(PhantomData); + + impl UncheckedOnRuntimeUpgrade for MigrateToV2Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut count = 0; + for (renewal_id, renewal) in AllowedRenewals::::drain() { + PotentialRenewals::::insert(renewal_id, renewal); + count += 1; + } + + log::info!( + target: LOG_TARGET, + "Storage migration v2 for pallet-broker finished.", + ); + + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok((AllowedRenewals::::iter_keys().count() as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_count = u32::decode(&mut &state[..]).expect("Known good"); + let new_count = PotentialRenewals::::iter_values().count() as u32; + + ensure!(old_count == new_count, "Renewal count should not change"); + Ok(()) + } + } +} + /// Migrate the pallet storage from `0` to `1`. pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, @@ -85,3 +136,11 @@ pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< Pallet, ::DbWeight, >; + +pub type MigrateV1ToV2 = frame_support::migrations::VersionedMigration< + 1, + 2, + v2::MigrateToV2Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index 6219b4eff1b..6fff6aa1008 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -199,7 +199,7 @@ impl crate::Config for Test { type WeightInfo = (); type PalletId = TestBrokerId; type AdminOrigin = EnsureOneOrRoot; - type PriceAdapter = Linear; + type PriceAdapter = CenterTargetPrice>; } pub fn advance_to(b: u64) { @@ -255,6 +255,10 @@ impl TestExt { Self(new_config()) } + pub fn new_with_config(config: ConfigRecordOf) -> Self { + Self(config) + } + pub fn advance_notice(mut self, advance_notice: Timeslice) -> Self { self.0.advance_notice = advance_notice as u64; self diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index f929f0d50dc..e953afd6dc3 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -25,7 +25,7 @@ use frame_support::{ }; use frame_system::RawOrigin::Root; use pretty_assertions::assert_eq; -use sp_runtime::{traits::Get, TokenError}; +use sp_runtime::{traits::Get, Perbill, TokenError}; use CoreAssignment::*; use CoretimeTraceItem::*; use Finality::*; @@ -78,9 +78,9 @@ fn drop_renewal_works() { let e = Error::::StillValid; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); advance_to(12); - assert_eq!(AllowedRenewals::::iter().count(), 1); + assert_eq!(PotentialRenewals::::iter().count(), 1); assert_ok!(Broker::do_drop_renewal(region.core, region.begin + 3)); - assert_eq!(AllowedRenewals::::iter().count(), 0); + assert_eq!(PotentialRenewals::::iter().count(), 0); let e = Error::::UnknownRenewal; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); }); @@ -361,22 +361,91 @@ fn migration_works() { #[test] fn renewal_works() { - TestExt::new().endow(1, 1000).execute_with(|| { + let b = 100_000; + TestExt::new().endow(1, b).execute_with(move || { assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); - assert_eq!(balance(1), 900); + assert_eq!(balance(1), 99_900); assert_ok!(Broker::do_assign(region, None, 1001, Final)); // Should now be renewable. advance_to(6); assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::TooEarly); let core = Broker::do_renew(1, region.core).unwrap(); - assert_eq!(balance(1), 800); + assert_eq!(balance(1), 99_800); advance_to(8); assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::SoldOut); advance_to(12); assert_ok!(Broker::do_renew(1, core)); - assert_eq!(balance(1), 690); + assert_eq!(balance(1), 99_690); + }); +} + +#[test] +/// Renewals have to affect price as well. Otherwise a market where everything is a renewal would +/// not work. Renewals happening in the leadin or after are effectively competing with the open +/// market and it makes sense to adjust the price to what was paid here. Assuming all renewals were +/// done in the interlude and only normal sales happen in the leadin, renewals will have no effect +/// on price. If there are no cores left for sale on the open markent, renewals will affect price +/// even in the interlude, making sure renewal prices stay in the range of the open market. +fn renewals_affect_price() { + sp_tracing::try_init_simple(); + let b = 100_000; + let config = ConfigRecord { + advance_notice: 2, + interlude_length: 10, + leadin_length: 20, + ideal_bulk_proportion: Perbill::from_percent(100), + limit_cores_offered: None, + // Region length is in time slices (2 blocks): + region_length: 20, + renewal_bump: Perbill::from_percent(10), + contribution_timeout: 5, + }; + TestExt::new_with_config(config).endow(1, b).execute_with(|| { + let price = 910; + assert_ok!(Broker::do_start_sales(10, 1)); + advance_to(11); + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + // Price is lower, because already one block in: + let b = b - price; + assert_eq!(balance(1), b); + assert_ok!(Broker::do_assign(region, None, 1001, Final)); + advance_to(40); + assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::TooEarly); + let core = Broker::do_renew(1, region.core).unwrap(); + // First renewal has same price as initial purchase. + let b = b - price; + assert_eq!(balance(1), b); + advance_to(51); + assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::SoldOut); + advance_to(81); + assert_ok!(Broker::do_renew(1, core)); + // Renewal bump in effect + let price = price + Perbill::from_percent(10) * price; + let b = b - price; + assert_eq!(balance(1), b); + + // Move after interlude and leadin - should reduce price. + advance_to(159); + Broker::do_renew(1, region.core).unwrap(); + let price = price + Perbill::from_percent(10) * price; + let b = b - price; + assert_eq!(balance(1), b); + + advance_to(161); + // Should have the reduced price now: + Broker::do_renew(1, region.core).unwrap(); + let price = 100; + let b = b - price; + assert_eq!(balance(1), b); + + // Price should be bumped normally again: + advance_to(201); + Broker::do_renew(1, region.core).unwrap(); + let price = 110; + let b = b - price; + assert_eq!(balance(1), b); }); } @@ -916,7 +985,8 @@ fn short_leases_are_cleaned() { #[test] fn leases_can_be_renewed() { - TestExt::new().endow(1, 1000).execute_with(|| { + let initial_balance = 100_000; + TestExt::new().endow(1, initial_balance).execute_with(|| { // Timeslice period is 2. // // Sale 1 starts at block 7, Sale 2 starts at 13. @@ -927,13 +997,13 @@ fn leases_can_be_renewed() { // Start the sales with only one core for this lease. assert_ok!(Broker::do_start_sales(100, 0)); - // Advance to sale period 1, we should get an AllowedRenewal for task 2001 for the next + // Advance to sale period 1, we should get an PotentialRenewal for task 2001 for the next // sale. advance_sale_period(); assert_eq!( - AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), - Some(AllowedRenewalRecord { - price: 100, + PotentialRenewals::::get(PotentialRenewalId { core: 0, when: 10 }), + Some(PotentialRenewalRecord { + price: 1000, completion: CompletionStatus::Complete( vec![ScheduleItem { mask: CoreMask::complete(), assignment: Task(2001) }] .try_into() @@ -947,8 +1017,8 @@ fn leases_can_be_renewed() { // Advance to sale period 2, where we can renew. advance_sale_period(); assert_ok!(Broker::do_renew(1, 0)); - // We renew for the base price of the previous sale period. - assert_eq!(balance(1), 900); + // We renew for the price of the previous sale period. + assert_eq!(balance(1), initial_balance - 1000); // We just renewed for this period. advance_sale_period(); @@ -1023,14 +1093,14 @@ fn short_leases_cannot_be_renewed() { // The lease is removed. assert_eq!(Leases::::get().len(), 0); - // We should have got an entry in AllowedRenewals, but we don't because rotate_sale + // We should have got an entry in PotentialRenewals, but we don't because rotate_sale // schedules leases a period in advance. This renewal should be in the period after next // because while bootstrapping our way into the sale periods, we give everything a lease for // period 1, so they can renew for period 2. So we have a core until the end of period 1, // but we are not marked as able to renew because we expired before sale period 1 starts. // // This should be fixed. - assert_eq!(AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), None); + assert_eq!(PotentialRenewals::::get(PotentialRenewalId { core: 0, when: 10 }), None); // And the lease has been removed from storage. assert_eq!(Leases::::get().len(), 0); @@ -1102,7 +1172,7 @@ fn purchase_requires_valid_status_and_sale_info() { let mut dummy_sale = SaleInfoRecord { sale_start: 0, leadin_length: 0, - price: 200, + end_price: 200, sellout_price: None, region_begin: 0, region_end: 3, @@ -1144,7 +1214,7 @@ fn renewal_requires_valid_status_and_sale_info() { let mut dummy_sale = SaleInfoRecord { sale_start: 0, leadin_length: 0, - price: 200, + end_price: 200, sellout_price: None, region_begin: 0, region_end: 3, @@ -1163,11 +1233,11 @@ fn renewal_requires_valid_status_and_sale_info() { assert_ok!(Broker::do_start_sales(200, 1)); assert_noop!(Broker::do_renew(1, 1), Error::::NotAllowed); - let record = AllowedRenewalRecord { + let record = PotentialRenewalRecord { price: 100, completion: CompletionStatus::Partial(CoreMask::from_chunk(0, 20)), }; - AllowedRenewals::::insert(AllowedRenewalId { core: 1, when: 4 }, &record); + PotentialRenewals::::insert(PotentialRenewalId { core: 1, when: 4 }, &record); assert_noop!(Broker::do_renew(1, 1), Error::::IncompleteAssignment); }); } @@ -1274,7 +1344,7 @@ fn config_works() { /// Ensure that a lease that ended before `start_sales` was called can be renewed. #[test] fn renewal_works_leases_ended_before_start_sales() { - TestExt::new().endow(1, 1000).execute_with(|| { + TestExt::new().endow(1, 100_000).execute_with(|| { let config = Configuration::::get().unwrap(); // This lease is ended before `start_stales` was called. @@ -1304,7 +1374,7 @@ fn renewal_works_leases_ended_before_start_sales() { let new_core = Broker::do_renew(1, 0).unwrap(); // Renewing the active lease doesn't work. assert_noop!(Broker::do_renew(1, 1), Error::::SoldOut); - assert_eq!(balance(1), 900); + assert_eq!(balance(1), 99000); // This intializes the third sale and the period 2. advance_sale_period(); @@ -1312,7 +1382,7 @@ fn renewal_works_leases_ended_before_start_sales() { // Renewing the active lease doesn't work. assert_noop!(Broker::do_renew(1, 0), Error::::SoldOut); - assert_eq!(balance(1), 800); + assert_eq!(balance(1), 98900); // All leases should have ended assert!(Leases::::get().is_empty()); @@ -1324,7 +1394,7 @@ fn renewal_works_leases_ended_before_start_sales() { assert_eq!(0, Broker::do_renew(1, new_core).unwrap()); // Renew the task 2. assert_eq!(1, Broker::do_renew(1, 0).unwrap()); - assert_eq!(balance(1), 600); + assert_eq!(balance(1), 98790); // This intializes the fifth sale and the period 4. advance_sale_period(); diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 04e9a65bf8f..20637cf7b90 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -17,10 +17,7 @@ use super::*; use frame_support::{pallet_prelude::*, weights::WeightMeter}; -use sp_arithmetic::{ - traits::{One, SaturatedConversion, Saturating, Zero}, - FixedPointNumber, -}; +use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero}; use sp_runtime::traits::ConvertBack; use sp_std::{vec, vec::Vec}; use CompletionStatus::Complete; @@ -163,31 +160,13 @@ impl Pallet { InstaPoolIo::::mutate(old_sale.region_end, |r| r.system.saturating_reduce(old_pooled)); // Calculate the start price for the upcoming sale. - let price = { - let offered = old_sale.cores_offered; - let ideal = old_sale.ideal_cores_sold; - let sold = old_sale.cores_sold; - - let maybe_purchase_price = if offered == 0 { - // No cores offered for sale - no purchase price. - None - } else if sold >= ideal { - // Sold more than the ideal amount. We should look for the last purchase price - // before the sell-out. If there was no purchase at all, then we avoid having a - // price here so that we make no alterations to it (since otherwise we would - // increase it). - old_sale.sellout_price - } else { - // Sold less than the ideal - we fall back to the regular price. - Some(old_sale.price) - }; - if let Some(purchase_price) = maybe_purchase_price { - T::PriceAdapter::adapt_price(sold.min(offered), ideal, offered) - .saturating_mul_int(purchase_price) - } else { - old_sale.price - } - }; + let new_prices = T::PriceAdapter::adapt_price(SalePerformance::from_sale(&old_sale)); + + log::debug!( + "Rotated sale, new prices: {:?}, {:?}", + new_prices.end_price, + new_prices.target_price + ); // Set workload for the reserved (system, probably) workloads. let region_begin = old_sale.region_end; @@ -220,12 +199,15 @@ impl Pallet { let expire = until < region_end; if expire { // last time for this one - make it renewable in the next sale. - let renewal_id = AllowedRenewalId { core: first_core, when: region_end }; - let record = AllowedRenewalRecord { price, completion: Complete(schedule) }; - AllowedRenewals::::insert(renewal_id, &record); + let renewal_id = PotentialRenewalId { core: first_core, when: region_end }; + let record = PotentialRenewalRecord { + price: new_prices.target_price, + completion: Complete(schedule), + }; + PotentialRenewals::::insert(renewal_id, &record); Self::deposit_event(Event::Renewable { core: first_core, - price, + price: new_prices.target_price, begin: region_end, workload: record.completion.drain_complete().unwrap_or_default(), }); @@ -244,12 +226,19 @@ impl Pallet { let sale_start = now.saturating_add(config.interlude_length); let leadin_length = config.leadin_length; let ideal_cores_sold = (config.ideal_bulk_proportion * cores_offered as u32) as u16; + let sellout_price = if cores_offered > 0 { + // No core sold -> price was too high -> we have to adjust downwards. + Some(new_prices.end_price) + } else { + None + }; + // Update SaleInfo let new_sale = SaleInfoRecord { sale_start, leadin_length, - price, - sellout_price: None, + end_price: new_prices.end_price, + sellout_price, region_begin, region_end, first_core, @@ -257,12 +246,13 @@ impl Pallet { cores_offered, cores_sold: 0, }; + SaleInfo::::put(&new_sale); Self::deposit_event(Event::SaleInitialized { sale_start, leadin_length, start_price: Self::sale_price(&new_sale, now), - regular_price: price, + end_price: new_prices.end_price, region_begin, region_end, ideal_cores_sold, diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index f2cae9a41ad..885cac9a5c2 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -152,25 +152,28 @@ impl CompletionStatus { } } -/// The identity of a possible Core workload renewal. +/// The identity of a possibly renewable Core workload. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct AllowedRenewalId { +pub struct PotentialRenewalId { /// The core whose workload at the sale ending with `when` may be renewed to begin at `when`. pub core: CoreIndex, /// The point in time that the renewable workload on `core` ends and a fresh renewal may begin. pub when: Timeslice, } -/// A record of an allowed renewal. +/// A record of a potential renewal. +/// +/// The renewal will only actually be allowed if `CompletionStatus` is `Complete` at the time of +/// renewal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct AllowedRenewalRecord { +pub struct PotentialRenewalRecord { /// The price for which the next renewal can be made. pub price: Balance, /// The workload which will be scheduled on the Core in the case a renewal is made, or if /// incomplete, then the parts of the core which have been scheduled. pub completion: CompletionStatus, } -pub type AllowedRenewalRecordOf = AllowedRenewalRecord>; +pub type PotentialRenewalRecordOf = PotentialRenewalRecord>; /// General status of the system. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -211,7 +214,7 @@ pub struct SaleInfoRecord { /// The length in blocks of the Leadin Period (where the price is decreasing). pub leadin_length: BlockNumber, /// The price of Bulk Coretime after the Leadin Period. - pub price: Balance, + pub end_price: Balance, /// The first timeslice of the Regions which are being sold in this sale. pub region_begin: Timeslice, /// The timeslice on which the Regions which are being sold in the sale terminate. (i.e. One @@ -225,8 +228,9 @@ pub struct SaleInfoRecord { /// The index of the first core which is for sale. Core of Regions which are sold have /// incrementing indices from this. pub first_core: CoreIndex, - /// The latest price at which Bulk Coretime was purchased until surpassing the ideal number of - /// cores were sold. + /// The price at which cores have been sold out. + /// + /// Will only be `None` if no core was offered for sale. pub sellout_price: Option, /// Number of cores which have been sold; never more than cores_offered. pub cores_sold: CoreIndex, @@ -263,8 +267,11 @@ pub struct ConfigRecord { pub leadin_length: BlockNumber, /// The length in timeslices of Regions which are up for sale in forthcoming sales. pub region_length: Timeslice, - /// The proportion of cores available for sale which should be sold in order for the price - /// to remain the same in the next sale. + /// The proportion of cores available for sale which should be sold. + /// + /// If more cores are sold than this, then further sales will no longer be considered in + /// determining the sellout price. In other words the sellout price will be the last price + /// paid, without going over this limit. pub ideal_bulk_proportion: Perbill, /// An artificial limit to the number of cores which are allowed to be sold. If `Some` then /// no more cores will be sold than this. diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 4163817a8b5..9cceb7f970a 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -63,7 +63,7 @@ impl Pallet { pub fn sale_price(sale: &SaleInfoRecordOf, now: BlockNumberFor) -> BalanceOf { let num = now.saturating_sub(sale.sale_start).min(sale.leadin_length).saturated_into(); let through = FixedU64::from_rational(num, sale.leadin_length.saturated_into()); - T::PriceAdapter::leadin_factor_at(through).saturating_mul_int(sale.price) + T::PriceAdapter::leadin_factor_at(through).saturating_mul_int(sale.end_price) } pub(crate) fn charge(who: &T::AccountId, amount: BalanceOf) -> DispatchResult { @@ -72,6 +72,25 @@ impl Pallet { Ok(()) } + /// Buy a core at the specified price (price is to be determined by the caller). + /// + /// Note: It is the responsibility of the caller to write back the changed `SaleInfoRecordOf` to + /// storage. + pub(crate) fn purchase_core( + who: &T::AccountId, + price: BalanceOf, + sale: &mut SaleInfoRecordOf, + ) -> Result { + Self::charge(who, price)?; + log::debug!("Purchased core at: {:?}", price); + let core = sale.first_core.saturating_add(sale.cores_sold); + sale.cores_sold.saturating_inc(); + if sale.cores_sold <= sale.ideal_cores_sold || sale.sellout_price.is_none() { + sale.sellout_price = Some(price); + } + Ok(core) + } + pub fn issue( core: CoreIndex, begin: Timeslice, diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index 2aa1c282a41..d9d9d348e47 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/broker/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/broker/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -90,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_624_000 picoseconds. - Weight::from_parts(2_804_000, 0) + // Minimum execution time: 1_945_000 picoseconds. + Weight::from_parts(2_142_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -100,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 18_451_000 picoseconds. - Weight::from_parts(18_853_000, 7496) + // Minimum execution time: 16_274_000 picoseconds. + Weight::from_parts(16_828_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -111,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 16_899_000 picoseconds. - Weight::from_parts(17_645_000, 7496) + // Minimum execution time: 15_080_000 picoseconds. + Weight::from_parts(15_874_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -122,19 +120,19 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 10_239_000 picoseconds. - Weight::from_parts(10_754_000, 1526) + // Minimum execution time: 8_761_000 picoseconds. + Weight::from_parts(9_203_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::InstaPoolIo` (r:3 w:3) - /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -146,12 +144,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 51_250_000 picoseconds. - Weight::from_parts(54_643_012, 8499) - // Standard Error: 147 - .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) + // Minimum execution time: 26_057_000 picoseconds. + Weight::from_parts(46_673_357, 8499) + // Standard Error: 456 + .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(16_u64)) + .saturating_add(T::DbWeight::get().writes(15_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) @@ -162,13 +160,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `2120` - // Minimum execution time: 43_660_000 picoseconds. - Weight::from_parts(45_543_000, 2120) + // Measured: `651` + // Estimated: `2136` + // Minimum execution time: 40_907_000 picoseconds. + Weight::from_parts(42_566_000, 2136) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -178,8 +176,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `Authorship::Author` (r:1 w:0) /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:0) @@ -188,43 +186,43 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `753` + // Measured: `769` // Estimated: `4698` - // Minimum execution time: 63_122_000 picoseconds. - Weight::from_parts(64_366_000, 4698) + // Minimum execution time: 65_209_000 picoseconds. + Weight::from_parts(68_604_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 17_552_000 picoseconds. - Weight::from_parts(18_251_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 15_860_000 picoseconds. + Weight::from_parts(16_393_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 18_551_000 picoseconds. - Weight::from_parts(19_727_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 17_651_000 picoseconds. + Weight::from_parts(18_088_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 20_636_000 picoseconds. - Weight::from_parts(21_060_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 18_576_000 picoseconds. + Weight::from_parts(19_810_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -233,22 +231,22 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `740` + // Measured: `741` // Estimated: `4681` - // Minimum execution time: 32_394_000 picoseconds. - Weight::from_parts(33_324_000, 4681) + // Minimum execution time: 31_015_000 picoseconds. + Weight::from_parts(31_932_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -257,10 +255,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `775` + // Measured: `776` // Estimated: `5996` - // Minimum execution time: 38_128_000 picoseconds. - Weight::from_parts(39_274_000, 5996) + // Minimum execution time: 36_473_000 picoseconds. + Weight::from_parts(37_382_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -275,10 +273,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 70_453_000 picoseconds. - Weight::from_parts(70_652_822, 6196) - // Standard Error: 75_524 - .saturating_add(Weight::from_parts(2_335_289, 0).saturating_mul(m.into())) + // Minimum execution time: 64_957_000 picoseconds. + Weight::from_parts(66_024_232, 6196) + // Standard Error: 50_170 + .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -290,21 +288,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 43_945_000 picoseconds. - Weight::from_parts(45_249_000, 3593) + // Minimum execution time: 39_939_000 picoseconds. + Weight::from_parts(40_788_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `3550` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(32_995_000, 3550) + // Measured: `604` + // Estimated: `3551` + // Minimum execution time: 31_709_000 picoseconds. + Weight::from_parts(37_559_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -318,8 +316,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 48_053_000 picoseconds. - Weight::from_parts(51_364_000, 3533) + // Minimum execution time: 42_895_000 picoseconds. + Weight::from_parts(53_945_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -335,21 +333,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 57_372_000 picoseconds. - Weight::from_parts(59_466_000, 3593) + // Minimum execution time: 50_770_000 picoseconds. + Weight::from_parts(63_117_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 27_768_000 picoseconds. - Weight::from_parts(29_000_000, 4698) + // Minimum execution time: 33_396_000 picoseconds. + Weight::from_parts(36_247_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -358,20 +356,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(5_201_705, 0) + // Minimum execution time: 3_625_000 picoseconds. + Weight::from_parts(4_011_396, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(n: u32, ) -> Weight { + fn process_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_889_000 picoseconds. - Weight::from_parts(7_380_363, 1487) - // Standard Error: 21 - .saturating_add(Weight::from_parts(63, 0).saturating_mul(n.into())) + // Minimum execution time: 6_217_000 picoseconds. + Weight::from_parts(6_608_394, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -389,8 +385,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `972` // Estimated: `4437` - // Minimum execution time: 50_156_000 picoseconds. - Weight::from_parts(51_610_000, 4437) + // Minimum execution time: 46_853_000 picoseconds. + Weight::from_parts(47_740_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -405,14 +401,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 38_246_000 picoseconds. - Weight::from_parts(40_008_850, 8499) - // Standard Error: 94 - .saturating_add(Weight::from_parts(964, 0).saturating_mul(n.into())) + // Minimum execution time: 34_240_000 picoseconds. + Weight::from_parts(35_910_175, 8499) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -424,8 +418,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_962_000 picoseconds. - Weight::from_parts(8_313_000, 3493) + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_336_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -437,8 +431,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 17_457_000 picoseconds. - Weight::from_parts(18_387_000, 4681) + // Minimum execution time: 15_029_000 picoseconds. + Weight::from_parts(15_567_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -446,8 +440,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 133_000 picoseconds. - Weight::from_parts(149_000, 0) + // Minimum execution time: 123_000 picoseconds. + Weight::from_parts(136_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -455,8 +449,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_407_000 picoseconds. - Weight::from_parts(2_634_000, 0) + // Minimum execution time: 1_775_000 picoseconds. + Weight::from_parts(1_911_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -471,8 +465,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `4068` - // Minimum execution time: 13_043_000 picoseconds. - Weight::from_parts(13_541_000, 4068) + // Minimum execution time: 11_859_000 picoseconds. + Weight::from_parts(12_214_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -482,8 +476,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_606_000 picoseconds. - Weight::from_parts(6_964_000, 1526) + // Minimum execution time: 5_864_000 picoseconds. + Weight::from_parts(6_231_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -497,8 +491,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_624_000 picoseconds. - Weight::from_parts(2_804_000, 0) + // Minimum execution time: 1_945_000 picoseconds. + Weight::from_parts(2_142_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -507,8 +501,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 18_451_000 picoseconds. - Weight::from_parts(18_853_000, 7496) + // Minimum execution time: 16_274_000 picoseconds. + Weight::from_parts(16_828_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -518,8 +512,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 16_899_000 picoseconds. - Weight::from_parts(17_645_000, 7496) + // Minimum execution time: 15_080_000 picoseconds. + Weight::from_parts(15_874_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -529,19 +523,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 10_239_000 picoseconds. - Weight::from_parts(10_754_000, 1526) + // Minimum execution time: 8_761_000 picoseconds. + Weight::from_parts(9_203_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::InstaPoolIo` (r:3 w:3) - /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -553,12 +547,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 51_250_000 picoseconds. - Weight::from_parts(54_643_012, 8499) - // Standard Error: 147 - .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) + // Minimum execution time: 26_057_000 picoseconds. + Weight::from_parts(46_673_357, 8499) + // Standard Error: 456 + .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(16_u64)) + .saturating_add(RocksDbWeight::get().writes(15_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) @@ -569,13 +563,13 @@ impl WeightInfo for () { /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `2120` - // Minimum execution time: 43_660_000 picoseconds. - Weight::from_parts(45_543_000, 2120) + // Measured: `651` + // Estimated: `2136` + // Minimum execution time: 40_907_000 picoseconds. + Weight::from_parts(42_566_000, 2136) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -585,8 +579,8 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `Authorship::Author` (r:1 w:0) /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:0) @@ -595,43 +589,43 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `753` + // Measured: `769` // Estimated: `4698` - // Minimum execution time: 63_122_000 picoseconds. - Weight::from_parts(64_366_000, 4698) + // Minimum execution time: 65_209_000 picoseconds. + Weight::from_parts(68_604_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 17_552_000 picoseconds. - Weight::from_parts(18_251_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 15_860_000 picoseconds. + Weight::from_parts(16_393_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 18_551_000 picoseconds. - Weight::from_parts(19_727_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 17_651_000 picoseconds. + Weight::from_parts(18_088_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 20_636_000 picoseconds. - Weight::from_parts(21_060_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 18_576_000 picoseconds. + Weight::from_parts(19_810_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -640,22 +634,22 @@ impl WeightInfo for () { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `740` + // Measured: `741` // Estimated: `4681` - // Minimum execution time: 32_394_000 picoseconds. - Weight::from_parts(33_324_000, 4681) + // Minimum execution time: 31_015_000 picoseconds. + Weight::from_parts(31_932_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -664,10 +658,10 @@ impl WeightInfo for () { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `775` + // Measured: `776` // Estimated: `5996` - // Minimum execution time: 38_128_000 picoseconds. - Weight::from_parts(39_274_000, 5996) + // Minimum execution time: 36_473_000 picoseconds. + Weight::from_parts(37_382_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -682,10 +676,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ยฑ0)` - // Minimum execution time: 70_453_000 picoseconds. - Weight::from_parts(70_652_822, 6196) - // Standard Error: 75_524 - .saturating_add(Weight::from_parts(2_335_289, 0).saturating_mul(m.into())) + // Minimum execution time: 64_957_000 picoseconds. + Weight::from_parts(66_024_232, 6196) + // Standard Error: 50_170 + .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -697,21 +691,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 43_945_000 picoseconds. - Weight::from_parts(45_249_000, 3593) + // Minimum execution time: 39_939_000 picoseconds. + Weight::from_parts(40_788_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `3550` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(32_995_000, 3550) + // Measured: `604` + // Estimated: `3551` + // Minimum execution time: 31_709_000 picoseconds. + Weight::from_parts(37_559_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -725,8 +719,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 48_053_000 picoseconds. - Weight::from_parts(51_364_000, 3533) + // Minimum execution time: 42_895_000 picoseconds. + Weight::from_parts(53_945_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -742,21 +736,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 57_372_000 picoseconds. - Weight::from_parts(59_466_000, 3593) + // Minimum execution time: 50_770_000 picoseconds. + Weight::from_parts(63_117_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 27_768_000 picoseconds. - Weight::from_parts(29_000_000, 4698) + // Minimum execution time: 33_396_000 picoseconds. + Weight::from_parts(36_247_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -765,20 +759,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(5_201_705, 0) + // Minimum execution time: 3_625_000 picoseconds. + Weight::from_parts(4_011_396, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(n: u32, ) -> Weight { + fn process_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_889_000 picoseconds. - Weight::from_parts(7_380_363, 1487) - // Standard Error: 21 - .saturating_add(Weight::from_parts(63, 0).saturating_mul(n.into())) + // Minimum execution time: 6_217_000 picoseconds. + Weight::from_parts(6_608_394, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -796,8 +788,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `972` // Estimated: `4437` - // Minimum execution time: 50_156_000 picoseconds. - Weight::from_parts(51_610_000, 4437) + // Minimum execution time: 46_853_000 picoseconds. + Weight::from_parts(47_740_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -812,14 +804,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 38_246_000 picoseconds. - Weight::from_parts(40_008_850, 8499) - // Standard Error: 94 - .saturating_add(Weight::from_parts(964, 0).saturating_mul(n.into())) + // Minimum execution time: 34_240_000 picoseconds. + Weight::from_parts(35_910_175, 8499) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -831,8 +821,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_962_000 picoseconds. - Weight::from_parts(8_313_000, 3493) + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_336_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -844,8 +834,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 17_457_000 picoseconds. - Weight::from_parts(18_387_000, 4681) + // Minimum execution time: 15_029_000 picoseconds. + Weight::from_parts(15_567_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -853,8 +843,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 133_000 picoseconds. - Weight::from_parts(149_000, 0) + // Minimum execution time: 123_000 picoseconds. + Weight::from_parts(136_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -862,8 +852,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_407_000 picoseconds. - Weight::from_parts(2_634_000, 0) + // Minimum execution time: 1_775_000 picoseconds. + Weight::from_parts(1_911_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -878,8 +868,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `4068` - // Minimum execution time: 13_043_000 picoseconds. - Weight::from_parts(13_541_000, 4068) + // Minimum execution time: 11_859_000 picoseconds. + Weight::from_parts(12_214_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -889,8 +879,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_606_000 picoseconds. - Weight::from_parts(6_964_000, 1526) + // Minimum execution time: 5_864_000 picoseconds. + Weight::from_parts(6_231_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -- GitLab From 4ab078d6754147ce731523292dd1882f8a7b5775 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 29 May 2024 23:23:27 +0200 Subject: [PATCH 089/106] pallet-staking: Put tests behind `cfg(debug_assertions)` (#4620) Otherwise these tests are failing if you don't run with `debug_assertions` enabled, which happens if you run tests locally in release mode. --- substrate/frame/staking/src/tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 76afa3333cb..2229eb28329 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -5251,6 +5251,7 @@ mod election_data_provider { // maybe_max_len`. #[test] #[should_panic] + #[cfg(debug_assertions)] fn only_iterates_max_2_times_max_allowed_len() { ExtBuilder::default() .nominate(false) @@ -5939,6 +5940,7 @@ fn min_commission_works() { #[test] #[should_panic] +#[cfg(debug_assertions)] fn change_of_absolute_max_nominations() { use frame_election_provider_support::ElectionDataProvider; ExtBuilder::default() -- GitLab From bcab07a8c63687a148f19883688c50a9fa603091 Mon Sep 17 00:00:00 2001 From: drskalman <35698397+drskalman@users.noreply.github.com> Date: Thu, 30 May 2024 05:31:39 -0400 Subject: [PATCH 090/106] Beefy client generic on aduthority Id (#1816) Revived version of https://github.com/paritytech/substrate/pull/13311 . Except Signature is not generic and is dictated by AuthorityId. --------- Co-authored-by: Davide Galassi Co-authored-by: Robert Hambrock Co-authored-by: Adrian Catangiu --- Cargo.lock | 5 + polkadot/node/service/src/lib.rs | 18 +- polkadot/rpc/Cargo.toml | 2 + polkadot/rpc/src/lib.rs | 18 +- substrate/bin/node/cli/src/service.rs | 20 +- substrate/bin/node/rpc/Cargo.toml | 2 + substrate/bin/node/rpc/src/lib.rs | 18 +- substrate/client/consensus/beefy/Cargo.toml | 1 - .../client/consensus/beefy/rpc/Cargo.toml | 1 + .../client/consensus/beefy/rpc/src/lib.rs | 39 ++-- .../consensus/beefy/rpc/src/notification.rs | 9 +- .../client/consensus/beefy/src/aux_schema.rs | 37 +-- .../beefy/src/communication/gossip.rs | 212 ++++++++++-------- .../beefy/src/communication/notification.rs | 10 +- .../incoming_requests_handler.rs | 4 +- .../outgoing_requests_engine.rs | 34 +-- substrate/client/consensus/beefy/src/error.rs | 7 + .../client/consensus/beefy/src/fisherman.rs | 28 ++- .../client/consensus/beefy/src/import.rs | 33 +-- .../consensus/beefy/src/justification.rs | 91 +++++--- .../client/consensus/beefy/src/keystore.rs | 17 +- substrate/client/consensus/beefy/src/lib.rs | 101 +++++---- substrate/client/consensus/beefy/src/round.rs | 144 +++++++----- substrate/client/consensus/beefy/src/tests.rs | 42 ++-- .../client/consensus/beefy/src/worker.rs | 183 ++++++++------- .../primitives/consensus/beefy/src/lib.rs | 26 ++- substrate/primitives/keystore/src/testing.rs | 2 +- 27 files changed, 660 insertions(+), 444 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8732f64efa..781dba880cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8912,10 +8912,12 @@ dependencies = [ "sc-sync-state-rpc", "sc-transaction-pool-api", "sp-api", + "sp-application-crypto", "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-beefy", "sp-keystore", "sp-runtime", "sp-statement-store", @@ -13704,10 +13706,12 @@ dependencies = [ "sc-sync-state-rpc", "sc-transaction-pool-api", "sp-api", + "sp-application-crypto", "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-beefy", "sp-keystore", "sp-runtime", "substrate-frame-rpc-system", @@ -17036,6 +17040,7 @@ dependencies = [ "sc-rpc", "serde", "serde_json", + "sp-application-crypto", "sp-consensus-beefy", "sp-core", "sp-runtime", diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 7c9b9e05d62..9ee81f80d66 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -88,6 +88,7 @@ use telemetry::TelemetryWorker; #[cfg(feature = "full-node")] use telemetry::{Telemetry, TelemetryWorkerHandle}; +use beefy_primitives::ecdsa_crypto; pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; @@ -394,8 +395,8 @@ type FullSelectChain = relay_chain_selection::SelectRelayChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; #[cfg(feature = "full-node")] -type FullBeefyBlockImport = - beefy::import::BeefyBlockImport; +type FullBeefyBlockImport = + beefy::import::BeefyBlockImport; #[cfg(feature = "full-node")] struct Basics { @@ -486,11 +487,14 @@ fn new_partial( babe::BabeBlockImport< Block, FullClient, - FullBeefyBlockImport>, + FullBeefyBlockImport< + FullGrandpaBlockImport, + ecdsa_crypto::AuthorityId, + >, >, grandpa::LinkHalf, babe::BabeLink, - beefy::BeefyVoterLinks, + beefy::BeefyVoterLinks, ), grandpa::SharedVoterState, sp_consensus_babe::SlotDuration, @@ -601,7 +605,7 @@ where subscription_executor: subscription_executor.clone(), finality_provider: finality_proof_provider.clone(), }, - beefy: polkadot_rpc::BeefyDeps { + beefy: polkadot_rpc::BeefyDeps:: { beefy_finality_proof_stream: beefy_rpc_links.from_voter_justif_stream.clone(), beefy_best_block_stream: beefy_rpc_links.from_voter_best_beefy_stream.clone(), subscription_executor, @@ -1293,7 +1297,9 @@ pub fn new_full< is_authority: role.is_authority(), }; - let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); + let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _, ecdsa_crypto::AuthorityId>( + beefy_params, + ); // BEEFY is part of consensus, if it fails we'll bring the node down with it to make sure it // is noticed. diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index 5af5e63b175..1900b595d67 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -17,8 +17,10 @@ sp-blockchain = { path = "../../substrate/primitives/blockchain" } sp-keystore = { path = "../../substrate/primitives/keystore" } sp-runtime = { path = "../../substrate/primitives/runtime" } sp-api = { path = "../../substrate/primitives/api" } +sp-application-crypto = { path = "../../substrate/primitives/application-crypto" } sp-consensus = { path = "../../substrate/primitives/consensus/common" } sp-consensus-babe = { path = "../../substrate/primitives/consensus/babe" } +sp-consensus-beefy = { path = "../../substrate/primitives/consensus/beefy" } sc-chain-spec = { path = "../../substrate/client/chain-spec" } sc-rpc = { path = "../../substrate/client/rpc" } sc-rpc-spec-v2 = { path = "../../substrate/client/rpc-spec-v2" } diff --git a/polkadot/rpc/src/lib.rs b/polkadot/rpc/src/lib.rs index 4455efd3b53..2daa246102f 100644 --- a/polkadot/rpc/src/lib.rs +++ b/polkadot/rpc/src/lib.rs @@ -29,10 +29,12 @@ use sc_consensus_beefy::communication::notification::{ use sc_consensus_grandpa::FinalityProofProvider; pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; use sp_api::ProvideRuntimeApi; +use sp_application_crypto::RuntimeAppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; +use sp_consensus_beefy::AuthorityIdBound; use sp_keystore::KeystorePtr; use txpool_api::TransactionPool; @@ -62,9 +64,9 @@ pub struct GrandpaDeps { } /// Dependencies for BEEFY -pub struct BeefyDeps { +pub struct BeefyDeps { /// Receives notifications about finality proof events from BEEFY. - pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, + pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, /// Receives notifications about best block events from BEEFY. pub beefy_best_block_stream: BeefyBestBlockStream, /// Executor to drive the subscription manager in the BEEFY RPC handler. @@ -72,7 +74,7 @@ pub struct BeefyDeps { } /// Full client dependencies -pub struct FullDeps { +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. @@ -88,14 +90,14 @@ pub struct FullDeps { /// GRANDPA specific dependencies. pub grandpa: GrandpaDeps, /// BEEFY specific dependencies. - pub beefy: BeefyDeps, + pub beefy: BeefyDeps, /// Backend used by the node. pub backend: Arc, } /// Instantiate all RPC extensions. -pub fn create_full( - FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, beefy, backend } : FullDeps, +pub fn create_full( + FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, beefy, backend } : FullDeps, ) -> Result> where C: ProvideRuntimeApi @@ -114,6 +116,8 @@ where SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::StateBackend>, + AuthorityId: AuthorityIdBound, + ::Signature: Send + Sync, { use frame_rpc_system::{System, SystemApiServer}; use mmr_rpc::{Mmr, MmrApiServer}; @@ -171,7 +175,7 @@ where )?; io.merge( - Beefy::::new( + Beefy::::new( beefy.beefy_finality_proof_stream, beefy.beefy_best_block_stream, beefy.subscription_executor, diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 84903bd9b87..e57ca04f3b7 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -20,7 +20,10 @@ //! Service implementation. Specialized wrapper over substrate service. -use polkadot_sdk::{sc_consensus_beefy as beefy, sc_consensus_grandpa as grandpa, *}; +use polkadot_sdk::{ + sc_consensus_beefy as beefy, sc_consensus_grandpa as grandpa, + sp_consensus_beefy as beefy_primitives, *, +}; use crate::Cli; use codec::Encode; @@ -67,8 +70,13 @@ type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; -type FullBeefyBlockImport = - beefy::import::BeefyBlockImport; +type FullBeefyBlockImport = beefy::import::BeefyBlockImport< + Block, + FullBackend, + FullClient, + InnerBlockImport, + beefy_primitives::ecdsa_crypto::AuthorityId, +>; /// The transaction pool type definition. pub type TransactionPool = sc_transaction_pool::FullPool; @@ -180,7 +188,7 @@ pub fn new_partial( >, grandpa::LinkHalf, sc_consensus_babe::BabeLink, - beefy::BeefyVoterLinks, + beefy::BeefyVoterLinks, ), grandpa::SharedVoterState, Option, @@ -328,7 +336,7 @@ pub fn new_partial( subscription_executor: subscription_executor.clone(), finality_provider: finality_proof_provider.clone(), }, - beefy: node_rpc::BeefyDeps { + beefy: node_rpc::BeefyDeps:: { beefy_finality_proof_stream: beefy_rpc_links .from_voter_justif_stream .clone(), @@ -683,7 +691,7 @@ pub fn new_full_base::Hash>>( is_authority: role.is_authority(), }; - let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); + let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _, _>(beefy_params); // BEEFY is part of consensus, if it fails we'll bring the node down with it to make sure it // is noticed. task_manager diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index 894dbf0da85..6ae80eb5785 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -26,6 +26,7 @@ sc-consensus-babe = { path = "../../../client/consensus/babe" } sc-consensus-babe-rpc = { path = "../../../client/consensus/babe/rpc" } sc-consensus-beefy = { path = "../../../client/consensus/beefy" } sc-consensus-beefy-rpc = { path = "../../../client/consensus/beefy/rpc" } +sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } sc-consensus-grandpa-rpc = { path = "../../../client/consensus/grandpa/rpc" } sc-mixnet = { path = "../../../client/mixnet" } @@ -41,6 +42,7 @@ sp-consensus = { path = "../../../primitives/consensus/common" } sp-consensus-babe = { path = "../../../primitives/consensus/babe" } sp-keystore = { path = "../../../primitives/keystore" } sp-runtime = { path = "../../../primitives/runtime" } +sp-application-crypto = { path = "../../../primitives/application-crypto" } sp-statement-store = { path = "../../../primitives/statement-store" } substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } substrate-state-trie-migration-rpc = { path = "../../../utils/frame/rpc/state-trie-migration-rpc" } diff --git a/substrate/bin/node/rpc/src/lib.rs b/substrate/bin/node/rpc/src/lib.rs index 4646524a25b..52cd7f9561d 100644 --- a/substrate/bin/node/rpc/src/lib.rs +++ b/substrate/bin/node/rpc/src/lib.rs @@ -47,10 +47,12 @@ pub use sc_rpc::SubscriptionTaskExecutor; pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; +use sp_application_crypto::RuntimeAppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; +use sp_consensus_beefy::AuthorityIdBound; use sp_keystore::KeystorePtr; /// Extra dependencies for BABE. @@ -76,9 +78,9 @@ pub struct GrandpaDeps { } /// Dependencies for BEEFY -pub struct BeefyDeps { +pub struct BeefyDeps { /// Receives notifications about finality proof events from BEEFY. - pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, + pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, /// Receives notifications about best block events from BEEFY. pub beefy_best_block_stream: BeefyBestBlockStream, /// Executor to drive the subscription manager in the BEEFY RPC handler. @@ -86,7 +88,7 @@ pub struct BeefyDeps { } /// Full client dependencies. -pub struct FullDeps { +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. @@ -102,7 +104,7 @@ pub struct FullDeps { /// GRANDPA specific dependencies. pub grandpa: GrandpaDeps, /// BEEFY specific dependencies. - pub beefy: BeefyDeps, + pub beefy: BeefyDeps, /// Shared statement store reference. pub statement_store: Arc, /// The backend used by the node. @@ -112,7 +114,7 @@ pub struct FullDeps { } /// Instantiate all Full RPC extensions. -pub fn create_full( +pub fn create_full( FullDeps { client, pool, @@ -125,7 +127,7 @@ pub fn create_full( statement_store, backend, mixnet_api, - }: FullDeps, + }: FullDeps, ) -> Result, Box> where C: ProvideRuntimeApi @@ -145,6 +147,8 @@ where SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, + AuthorityId: AuthorityIdBound, + ::Signature: Send + Sync, { use mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; @@ -223,7 +227,7 @@ where } io.merge( - Beefy::::new( + Beefy::::new( beefy.beefy_finality_proof_stream, beefy.beefy_best_block_stream, beefy.subscription_executor, diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 193acbe52a1..cd183f6bc8b 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -42,7 +42,6 @@ sp-keystore = { path = "../../../primitives/keystore" } sp-runtime = { path = "../../../primitives/runtime" } tokio = "1.37" - [dev-dependencies] serde = { workspace = true, default-features = true } tempfile = "3.1.0" diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index 07e46dbda15..84f90622b5c 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -24,6 +24,7 @@ sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } sc-rpc = { path = "../../../rpc" } sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } +sp-application-crypto = { path = "../../../../primitives/application-crypto" } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/rpc/src/lib.rs b/substrate/client/consensus/beefy/rpc/src/lib.rs index f01baee2d6e..66102eeb35c 100644 --- a/substrate/client/consensus/beefy/rpc/src/lib.rs +++ b/substrate/client/consensus/beefy/rpc/src/lib.rs @@ -21,9 +21,11 @@ #![warn(missing_docs)] use parking_lot::RwLock; +use sp_consensus_beefy::AuthorityIdBound; use std::sync::Arc; use sc_rpc::{utils::pipe_from_stream, SubscriptionTaskExecutor}; +use sp_application_crypto::RuntimeAppPublic; use sp_runtime::traits::Block as BlockT; use futures::{task::SpawnError, FutureExt, StreamExt}; @@ -98,19 +100,20 @@ pub trait BeefyApi { } /// Implements the BeefyApi RPC trait for interacting with BEEFY. -pub struct Beefy { - finality_proof_stream: BeefyVersionedFinalityProofStream, +pub struct Beefy { + finality_proof_stream: BeefyVersionedFinalityProofStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } -impl Beefy +impl Beefy where Block: BlockT, + AuthorityId: AuthorityIdBound, { /// Creates a new Beefy Rpc handler instance. pub fn new( - finality_proof_stream: BeefyVersionedFinalityProofStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, best_block_stream: BeefyBestBlockStream, executor: SubscriptionTaskExecutor, ) -> Result { @@ -129,16 +132,18 @@ where } #[async_trait] -impl BeefyApiServer - for Beefy +impl BeefyApiServer + for Beefy where Block: BlockT, + AuthorityId: AuthorityIdBound, + ::Signature: Send + Sync, { fn subscribe_justifications(&self, pending: PendingSubscriptionSink) { let stream = self .finality_proof_stream .subscribe(100_000) - .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); + .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); sc_rpc::utils::spawn_subscription_task(&self.executor, pipe_from_stream(pending, stream)); } @@ -158,20 +163,26 @@ mod tests { communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, }; - use sp_consensus_beefy::{known_payloads, Payload, SignedCommitment}; + use sp_consensus_beefy::{ecdsa_crypto, known_payloads, Payload, SignedCommitment}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefyVersionedFinalityProofSender) { + fn setup_io_handler() -> ( + RpcModule>, + BeefyVersionedFinalityProofSender, + ) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefyVersionedFinalityProofSender) { + ) -> ( + RpcModule>, + BeefyVersionedFinalityProofSender, + ) { let (finality_proof_sender, finality_proof_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let handler = Beefy::new(finality_proof_stream, best_block_stream, sc_rpc::testing::test_executor()) @@ -250,10 +261,10 @@ mod tests { assert_eq!(response, expected); } - fn create_finality_proof() -> BeefyVersionedFinalityProof { + fn create_finality_proof() -> BeefyVersionedFinalityProof { let payload = Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); - BeefyVersionedFinalityProof::::V1(SignedCommitment { + BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: sp_consensus_beefy::Commitment { payload, block_number: 5, @@ -280,7 +291,7 @@ mod tests { // Inspect what we received let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); - let recv_finality_proof: BeefyVersionedFinalityProof = + let recv_finality_proof: BeefyVersionedFinalityProof = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(&recv_sub_id, sub.subscription_id()); assert_eq!(recv_finality_proof, finality_proof); diff --git a/substrate/client/consensus/beefy/rpc/src/notification.rs b/substrate/client/consensus/beefy/rpc/src/notification.rs index 690c511b999..d4339058a69 100644 --- a/substrate/client/consensus/beefy/rpc/src/notification.rs +++ b/substrate/client/consensus/beefy/rpc/src/notification.rs @@ -19,6 +19,7 @@ use codec::Encode; use serde::{Deserialize, Serialize}; +use sp_consensus_beefy::AuthorityIdBound; use sp_runtime::traits::Block as BlockT; /// An encoded finality proof proving that the given header has been finalized. @@ -28,11 +29,15 @@ use sp_runtime::traits::Block as BlockT; pub struct EncodedVersionedFinalityProof(sp_core::Bytes); impl EncodedVersionedFinalityProof { - pub fn new( - finality_proof: sc_consensus_beefy::justification::BeefyVersionedFinalityProof, + pub fn new( + finality_proof: sc_consensus_beefy::justification::BeefyVersionedFinalityProof< + Block, + AuthorityId, + >, ) -> Self where Block: BlockT, + AuthorityId: AuthorityIdBound, { EncodedVersionedFinalityProof(finality_proof.encode().into()) } diff --git a/substrate/client/consensus/beefy/src/aux_schema.rs b/substrate/client/consensus/beefy/src/aux_schema.rs index 534f668ae69..1922494ad11 100644 --- a/substrate/client/consensus/beefy/src/aux_schema.rs +++ b/substrate/client/consensus/beefy/src/aux_schema.rs @@ -20,8 +20,10 @@ use crate::{error::Error, worker::PersistedState, LOG_TARGET}; use codec::{Decode, Encode}; -use log::{debug, trace}; +use log::{debug, trace, warn}; use sc_client_api::{backend::AuxStore, Backend}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_consensus_beefy::AuthorityIdBound; use sp_runtime::traits::Block as BlockT; const VERSION_KEY: &[u8] = b"beefy_auxschema_version"; @@ -36,26 +38,27 @@ pub(crate) fn write_current_version(backend: &BE) -> Result<(), Er } /// Write voter state. -pub(crate) fn write_voter_state( +pub(crate) fn write_voter_state( backend: &BE, - state: &PersistedState, -) -> Result<(), Error> { + state: &PersistedState, +) -> ClientResult<()> { trace!(target: LOG_TARGET, "๐Ÿฅฉ persisting {:?}", state); AuxStore::insert_aux(backend, &[(WORKER_STATE_KEY, state.encode().as_slice())], &[]) - .map_err(|e| Error::Backend(e.to_string())) } -fn load_decode(backend: &BE, key: &[u8]) -> Result, Error> { - match backend.get_aux(key).map_err(|e| Error::Backend(e.to_string()))? { +fn load_decode(backend: &BE, key: &[u8]) -> ClientResult> { + match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) - .map_err(|e| Error::Backend(format!("BEEFY DB is corrupted: {}", e))) + .map_err(|e| ClientError::Backend(format!("BEEFY DB is corrupted: {}", e))) .map(Some), } } /// Load or initialize persistent data from backend. -pub(crate) fn load_persistent(backend: &BE) -> Result>, Error> +pub(crate) fn load_persistent( + backend: &BE, +) -> ClientResult>> where B: BlockT, BE: Backend, @@ -64,9 +67,14 @@ where match version { None => (), - Some(1) | Some(2) | Some(3) => (), // versions 1, 2 & 3 are obsolete and should be ignored - Some(4) => return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), - other => return Err(Error::Backend(format!("Unsupported BEEFY DB version: {:?}", other))), + + Some(v) if 1 <= v && v <= 3 => + // versions 1, 2 & 3 are obsolete and should be ignored + warn!(target: LOG_TARGET, "๐Ÿฅฉ backend contains a BEEFY state of an obsolete version {v}. ignoring..."), + Some(4) => + return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), + other => + return Err(ClientError::Backend(format!("Unsupported BEEFY DB version: {:?}", other))), } // No persistent state found in DB. @@ -78,6 +86,7 @@ pub(crate) mod tests { use super::*; use crate::tests::BeefyTestNet; use sc_network_test::TestNetFactory; + use sp_consensus_beefy::ecdsa_crypto; // also used in tests.rs pub fn verify_persisted_version>(backend: &BE) -> bool { @@ -91,7 +100,7 @@ pub(crate) mod tests { let backend = net.peer(0).client().as_backend(); // version not available in db -> None - assert_eq!(load_persistent(&*backend).unwrap(), None); + assert_eq!(load_persistent::<_, _, ecdsa_crypto::AuthorityId>(&*backend).unwrap(), None); // populate version in db write_current_version(&*backend).unwrap(); @@ -99,7 +108,7 @@ pub(crate) mod tests { assert_eq!(load_decode(&*backend, VERSION_KEY).unwrap(), Some(CURRENT_VERSION)); // version is available in db but state isn't -> None - assert_eq!(load_persistent(&*backend).unwrap(), None); + assert_eq!(load_persistent::<_, _, ecdsa_crypto::AuthorityId>(&*backend).unwrap(), None); // full `PersistedState` load is tested in `tests.rs`. } diff --git a/substrate/client/consensus/beefy/src/communication/gossip.rs b/substrate/client/consensus/beefy/src/communication/gossip.rs index 947fe13856f..95cac250b7c 100644 --- a/substrate/client/consensus/beefy/src/communication/gossip.rs +++ b/substrate/client/consensus/beefy/src/communication/gossip.rs @@ -36,10 +36,8 @@ use crate::{ keystore::BeefyKeystore, LOG_TARGET, }; -use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - ValidatorSet, ValidatorSetId, VoteMessage, -}; +use sp_application_crypto::RuntimeAppPublic; +use sp_consensus_beefy::{AuthorityIdBound, ValidatorSet, ValidatorSetId, VoteMessage}; // Timeout for rebroadcasting messages. #[cfg(not(test))] @@ -72,16 +70,19 @@ enum Consider { /// BEEFY gossip message type that gets encoded and sent on the network. #[derive(Debug, Encode, Decode)] -pub(crate) enum GossipMessage { +pub(crate) enum GossipMessage { /// BEEFY message with commitment and single signature. - Vote(VoteMessage, AuthorityId, Signature>), + Vote(VoteMessage, AuthorityId, ::Signature>), /// BEEFY justification with commitment and signatures. - FinalityProof(BeefyVersionedFinalityProof), + FinalityProof(BeefyVersionedFinalityProof), } -impl GossipMessage { +impl GossipMessage { /// Return inner vote if this message is a Vote. - pub fn unwrap_vote(self) -> Option, AuthorityId, Signature>> { + pub fn unwrap_vote( + self, + ) -> Option, AuthorityId, ::Signature>> + { match self { GossipMessage::Vote(vote) => Some(vote), GossipMessage::FinalityProof(_) => None, @@ -89,7 +90,7 @@ impl GossipMessage { } /// Return inner finality proof if this message is a FinalityProof. - pub fn unwrap_finality_proof(self) -> Option> { + pub fn unwrap_finality_proof(self) -> Option> { match self { GossipMessage::Vote(_) => None, GossipMessage::FinalityProof(proof) => Some(proof), @@ -114,33 +115,33 @@ where } #[derive(Clone, Debug)] -pub(crate) struct GossipFilterCfg<'a, B: Block> { +pub(crate) struct GossipFilterCfg<'a, B: Block, AuthorityId: AuthorityIdBound> { pub start: NumberFor, pub end: NumberFor, pub validator_set: &'a ValidatorSet, } #[derive(Clone, Debug)] -struct FilterInner { +struct FilterInner { pub start: NumberFor, pub end: NumberFor, pub validator_set: ValidatorSet, } -struct Filter { +struct Filter { // specifies live rounds - inner: Option>, + inner: Option>, // cache of seen valid justifications in active rounds rounds_with_valid_proofs: BTreeSet>, } -impl Filter { +impl Filter { pub fn new() -> Self { Self { inner: None, rounds_with_valid_proofs: BTreeSet::new() } } /// Update filter to new `start` and `set_id`. - fn update(&mut self, cfg: GossipFilterCfg) { + fn update(&mut self, cfg: GossipFilterCfg) { self.rounds_with_valid_proofs .retain(|&round| round >= cfg.start && round <= cfg.end); // only clone+overwrite big validator_set if set_id changed @@ -220,21 +221,22 @@ impl Filter { /// rejected/expired. /// ///All messaging is handled in a single BEEFY global topic. -pub(crate) struct GossipValidator +pub(crate) struct GossipValidator where B: Block, { votes_topic: B::Hash, justifs_topic: B::Hash, - gossip_filter: RwLock>, + gossip_filter: RwLock>, next_rebroadcast: Mutex, known_peers: Arc>>, network: Arc, } -impl GossipValidator +impl GossipValidator where B: Block, + AuthorityId: AuthorityIdBound, { pub(crate) fn new(known_peers: Arc>>, network: Arc) -> Self { Self { @@ -250,7 +252,7 @@ where /// Update gossip validator filter. /// /// Only votes for `set_id` and rounds `start <= round <= end` will be accepted. - pub(crate) fn update_filter(&self, filter: GossipFilterCfg) { + pub(crate) fn update_filter(&self, filter: GossipFilterCfg) { debug!( target: LOG_TARGET, "๐Ÿฅฉ New gossip filter: start {:?}, end {:?}, validator set id {:?}", @@ -260,10 +262,11 @@ where } } -impl GossipValidator +impl GossipValidator where B: Block, N: NetworkPeers, + AuthorityId: AuthorityIdBound, { fn report(&self, who: PeerId, cost_benefit: ReputationChange) { self.network.report_peer(who, cost_benefit); @@ -271,7 +274,7 @@ where fn validate_vote( &self, - vote: VoteMessage, AuthorityId, Signature>, + vote: VoteMessage, AuthorityId, ::Signature>, sender: &PeerId, ) -> Action { let round = vote.commitment.block_number; @@ -299,7 +302,7 @@ where .unwrap_or(false) { debug!(target: LOG_TARGET, "Message from voter not in validator set: {}", vote.id); - return Action::Discard(cost::UNKNOWN_VOTER) + return Action::Discard(cost::UNKNOWN_VOTER); } } @@ -316,10 +319,10 @@ where fn validate_finality_proof( &self, - proof: BeefyVersionedFinalityProof, + proof: BeefyVersionedFinalityProof, sender: &PeerId, ) -> Action { - let (round, set_id) = proof_block_num_and_set_id::(&proof); + let (round, set_id) = proof_block_num_and_set_id::(&proof); self.known_peers.lock().note_vote_for(*sender, round); let action = { @@ -336,7 +339,7 @@ where } if guard.is_already_proven(round) { - return Action::Discard(benefit::NOT_INTERESTED) + return Action::Discard(benefit::NOT_INTERESTED); } // Verify justification signatures. @@ -344,7 +347,7 @@ where .validator_set() .map(|validator_set| { if let Err((_, signatures_checked)) = - verify_with_validator_set::(round, validator_set, &proof) + verify_with_validator_set::(round, validator_set, &proof) { debug!( target: LOG_TARGET, @@ -369,9 +372,10 @@ where } } -impl Validator for GossipValidator +impl Validator for GossipValidator where B: Block, + AuthorityId: AuthorityIdBound, N: NetworkPeers + Send + Sync, { fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) { @@ -385,7 +389,7 @@ where mut data: &[u8], ) -> ValidationResult { let raw = data; - let action = match GossipMessage::::decode_all(&mut data) { + let action = match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => self.validate_vote(msg, sender), Ok(GossipMessage::FinalityProof(proof)) => self.validate_finality_proof(proof, sender), Err(e) => { @@ -414,26 +418,28 @@ where fn message_expired<'a>(&'a self) -> Box bool + 'a> { let filter = self.gossip_filter.read(); - Box::new(move |_topic, mut data| match GossipMessage::::decode_all(&mut data) { - Ok(GossipMessage::Vote(msg)) => { - let round = msg.commitment.block_number; - let set_id = msg.commitment.validator_set_id; - let expired = filter.consider_vote(round, set_id) != Consider::Accept; - trace!(target: LOG_TARGET, "๐Ÿฅฉ Vote for round #{} expired: {}", round, expired); - expired - }, - Ok(GossipMessage::FinalityProof(proof)) => { - let (round, set_id) = proof_block_num_and_set_id::(&proof); - let expired = filter.consider_finality_proof(round, set_id) != Consider::Accept; - trace!( - target: LOG_TARGET, - "๐Ÿฅฉ Finality proof for round #{} expired: {}", - round, + Box::new(move |_topic, mut data| { + match GossipMessage::::decode_all(&mut data) { + Ok(GossipMessage::Vote(msg)) => { + let round = msg.commitment.block_number; + let set_id = msg.commitment.validator_set_id; + let expired = filter.consider_vote(round, set_id) != Consider::Accept; + trace!(target: LOG_TARGET, "๐Ÿฅฉ Vote for round #{} expired: {}", round, expired); expired - ); - expired - }, - Err(_) => true, + }, + Ok(GossipMessage::FinalityProof(proof)) => { + let (round, set_id) = proof_block_num_and_set_id::(&proof); + let expired = filter.consider_finality_proof(round, set_id) != Consider::Accept; + trace!( + target: LOG_TARGET, + "๐Ÿฅฉ Finality proof for round #{} expired: {}", + round, + expired + ); + expired + }, + Err(_) => true, + } }) } @@ -455,10 +461,10 @@ where let filter = self.gossip_filter.read(); Box::new(move |_who, intent, _topic, mut data| { if let MessageIntent::PeriodicRebroadcast = intent { - return do_rebroadcast + return do_rebroadcast; } - match GossipMessage::::decode_all(&mut data) { + match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => { let round = msg.commitment.block_number; let set_id = msg.commitment.validator_set_id; @@ -467,7 +473,7 @@ where allowed }, Ok(GossipMessage::FinalityProof(proof)) => { - let (round, set_id) = proof_block_num_and_set_id::(&proof); + let (round, set_id) = proof_block_num_and_set_id::(&proof); let allowed = filter.consider_finality_proof(round, set_id) == Consider::Accept; trace!( target: LOG_TARGET, @@ -490,8 +496,8 @@ pub(crate) mod tests { use sc_network_test::Block; use sp_application_crypto::key_types::BEEFY as BEEFY_KEY_TYPE; use sp_consensus_beefy::{ - ecdsa_crypto::Signature, known_payloads, test_utils::Keyring, Commitment, MmrRootHash, - Payload, SignedCommitment, VoteMessage, + ecdsa_crypto, known_payloads, test_utils::Keyring, Commitment, MmrRootHash, Payload, + SignedCommitment, VoteMessage, }; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -607,16 +613,18 @@ pub(crate) mod tests { } pub fn sign_commitment( - who: &Keyring, + who: &Keyring, commitment: &Commitment, - ) -> Signature { + ) -> ecdsa_crypto::Signature { let store = MemoryKeystore::new(); store.ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&who.to_seed())).unwrap(); - let beefy_keystore: BeefyKeystore = Some(store.into()).into(); + let beefy_keystore: BeefyKeystore = Some(store.into()).into(); beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() } - fn dummy_vote(block_number: u64) -> VoteMessage { + fn dummy_vote( + block_number: u64, + ) -> VoteMessage { let payload = Payload::from_single_entry( known_payloads::MMR_ROOT_ID, MmrRootHash::default().encode(), @@ -629,8 +637,8 @@ pub(crate) mod tests { pub fn dummy_proof( block_number: u64, - validator_set: &ValidatorSet, - ) -> BeefyVersionedFinalityProof { + validator_set: &ValidatorSet, + ) -> BeefyVersionedFinalityProof { let payload = Payload::from_single_entry( known_payloads::MMR_ROOT_ID, MmrRootHash::default().encode(), @@ -639,25 +647,29 @@ pub(crate) mod tests { let signatures = validator_set .validators() .iter() - .map(|validator: &AuthorityId| { + .map(|validator: &ecdsa_crypto::AuthorityId| { Some(sign_commitment( - &Keyring::::from_public(validator).unwrap(), + &Keyring::::from_public(validator).unwrap(), &commitment, )) }) .collect(); - BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment, signatures }) + BeefyVersionedFinalityProof::::V1(SignedCommitment { + commitment, + signatures, + }) } #[test] fn should_validate_messages() { - let keys = vec![Keyring::::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); + let keys = vec![Keyring::::Alice.public()]; + let validator_set = + ValidatorSet::::new(keys.clone(), 0).unwrap(); let (network, mut report_stream) = TestNetwork::new(); - let gv = GossipValidator::::new( + let gv = GossipValidator::::new( Arc::new(Mutex::new(KnownPeers::new())), Arc::new(network), ); @@ -678,7 +690,8 @@ pub(crate) mod tests { // verify votes validation let vote = dummy_vote(3); - let encoded = GossipMessage::::Vote(vote.clone()).encode(); + let encoded = + GossipMessage::::Vote(vote.clone()).encode(); // filter not initialized let res = gv.validate(&mut context, &sender, &encoded); @@ -696,7 +709,7 @@ pub(crate) mod tests { // reject vote, voter not in validator set let mut bad_vote = vote.clone(); bad_vote.id = Keyring::Bob.public(); - let bad_vote = GossipMessage::::Vote(bad_vote).encode(); + let bad_vote = GossipMessage::::Vote(bad_vote).encode(); let res = gv.validate(&mut context, &sender, &bad_vote); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::UNKNOWN_VOTER; @@ -726,7 +739,8 @@ pub(crate) mod tests { // reject old proof let proof = dummy_proof(5, &validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::OUTDATED_MESSAGE; @@ -734,7 +748,8 @@ pub(crate) mod tests { // accept next proof with good set_id let proof = dummy_proof(7, &validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); expected_report.cost_benefit = benefit::VALIDATED_PROOF; @@ -742,16 +757,18 @@ pub(crate) mod tests { // accept future proof with good set_id let proof = dummy_proof(20, &validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); expected_report.cost_benefit = benefit::VALIDATED_PROOF; assert_eq!(report_stream.try_next().unwrap().unwrap(), expected_report); // reject proof, future set_id - let bad_validator_set = ValidatorSet::::new(keys, 1).unwrap(); + let bad_validator_set = ValidatorSet::::new(keys, 1).unwrap(); let proof = dummy_proof(20, &bad_validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::FUTURE_MESSAGE; @@ -759,9 +776,10 @@ pub(crate) mod tests { // reject proof, bad signatures (Bob instead of Alice) let bad_validator_set = - ValidatorSet::::new(vec![Keyring::Bob.public()], 0).unwrap(); + ValidatorSet::::new(vec![Keyring::Bob.public()], 0).unwrap(); let proof = dummy_proof(21, &bad_validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::INVALID_PROOF; @@ -772,8 +790,9 @@ pub(crate) mod tests { #[test] fn messages_allowed_and_expired() { let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); - let gv = GossipValidator::::new( + let validator_set = + ValidatorSet::::new(keys.clone(), 0).unwrap(); + let gv = GossipValidator::::new( Arc::new(Mutex::new(KnownPeers::new())), Arc::new(TestNetwork::new().0), ); @@ -793,58 +812,70 @@ pub(crate) mod tests { // inactive round 1 -> expired let vote = dummy_vote(1); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(expired(topic, &mut encoded_vote)); let proof = dummy_proof(1, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(expired(topic, &mut encoded_proof)); // active round 2 -> !expired - concluded but still gossiped let vote = dummy_vote(2); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); let proof = dummy_proof(2, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // using wrong set_id -> !allowed, expired - let bad_validator_set = ValidatorSet::::new(keys.clone(), 1).unwrap(); + let bad_validator_set = + ValidatorSet::::new(keys.clone(), 1).unwrap(); let proof = dummy_proof(2, &bad_validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(expired(topic, &mut encoded_proof)); // in progress round 3 -> !expired let vote = dummy_vote(3); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); let proof = dummy_proof(3, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // unseen round 4 -> !expired let vote = dummy_vote(4); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); let proof = dummy_proof(4, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // future round 11 -> expired let vote = dummy_vote(11); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(expired(topic, &mut encoded_vote)); // future proofs allowed while same set_id -> allowed let proof = dummy_proof(11, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); } @@ -852,8 +883,9 @@ pub(crate) mod tests { #[test] fn messages_rebroadcast() { let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); - let gv = GossipValidator::::new( + let validator_set = + ValidatorSet::::new(keys.clone(), 0).unwrap(); + let gv = GossipValidator::::new( Arc::new(Mutex::new(KnownPeers::new())), Arc::new(TestNetwork::new().0), ); diff --git a/substrate/client/consensus/beefy/src/communication/notification.rs b/substrate/client/consensus/beefy/src/communication/notification.rs index a4486e523c3..8bb5d848b4f 100644 --- a/substrate/client/consensus/beefy/src/communication/notification.rs +++ b/substrate/client/consensus/beefy/src/communication/notification.rs @@ -32,13 +32,15 @@ pub type BeefyBestBlockStream = /// The sending half of the notifications channel(s) used to send notifications /// about versioned finality proof generated at the end of a BEEFY round. -pub type BeefyVersionedFinalityProofSender = - NotificationSender>; +pub type BeefyVersionedFinalityProofSender = + NotificationSender>; /// The receiving half of a notifications channel used to receive notifications /// about versioned finality proof generated at the end of a BEEFY round. -pub type BeefyVersionedFinalityProofStream = - NotificationStream, BeefyVersionedFinalityProofTracingKey>; +pub type BeefyVersionedFinalityProofStream = NotificationStream< + BeefyVersionedFinalityProof, + BeefyVersionedFinalityProofTracingKey, +>; /// Provides tracing key for BEEFY best block stream. #[derive(Clone)] diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index 7893066a01e..350e7a271bc 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -87,9 +87,9 @@ impl IncomingRequest { sent_feedback: None, }; if let Err(_) = pending_response.send(response) { - return Err(Error::DecodingErrorNoReputationChange(peer, err)) + return Err(Error::DecodingErrorNoReputationChange(peer, err)); } - return Err(Error::DecodingError(peer, err)) + return Err(Error::DecodingError(peer, err)); }, }; Ok(Self::new(peer, payload, pending_response)) diff --git a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs index 2ab07296090..4d40656375e 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -27,7 +27,7 @@ use sc_network::{ NetworkRequest, ProtocolName, }; use sc_network_types::PeerId; -use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, ValidatorSet}; +use sp_consensus_beefy::{AuthorityIdBound, ValidatorSet}; use sp_runtime::traits::{Block, NumberFor}; use std::{collections::VecDeque, result::Result, sync::Arc}; @@ -49,38 +49,38 @@ type Response = Result<(Vec, ProtocolName), RequestFailure>; type ResponseReceiver = oneshot::Receiver; #[derive(Clone, Debug)] -struct RequestInfo { +struct RequestInfo { block: NumberFor, active_set: ValidatorSet, } -enum State { +enum State { Idle, - AwaitingResponse(PeerId, RequestInfo, ResponseReceiver), + AwaitingResponse(PeerId, RequestInfo, ResponseReceiver), } /// Possible engine responses. -pub(crate) enum ResponseInfo { +pub(crate) enum ResponseInfo { /// No peer response available yet. Pending, /// Valid justification provided alongside peer reputation changes. - ValidProof(BeefyVersionedFinalityProof, PeerReport), + ValidProof(BeefyVersionedFinalityProof, PeerReport), /// No justification yet, only peer reputation changes. PeerReport(PeerReport), } -pub struct OnDemandJustificationsEngine { +pub struct OnDemandJustificationsEngine { network: Arc, protocol_name: ProtocolName, live_peers: Arc>>, peers_cache: VecDeque, - state: State, + state: State, metrics: Option, } -impl OnDemandJustificationsEngine { +impl OnDemandJustificationsEngine { pub fn new( network: Arc, protocol_name: ProtocolName, @@ -106,13 +106,13 @@ impl OnDemandJustificationsEngine { let live = self.live_peers.lock(); while let Some(peer) = self.peers_cache.pop_front() { if live.contains(&peer) { - return Some(peer) + return Some(peer); } } None } - fn request_from_peer(&mut self, peer: PeerId, req_info: RequestInfo) { + fn request_from_peer(&mut self, peer: PeerId, req_info: RequestInfo) { debug!( target: BEEFY_SYNC_LOG_TARGET, "๐Ÿฅฉ requesting justif #{:?} from peer {:?}", req_info.block, peer, @@ -140,7 +140,7 @@ impl OnDemandJustificationsEngine { pub fn request(&mut self, block: NumberFor, active_set: ValidatorSet) { // ignore new requests while there's already one pending if matches!(self.state, State::AwaitingResponse(_, _, _)) { - return + return; } self.reset_peers_cache_for_block(block); @@ -174,9 +174,9 @@ impl OnDemandJustificationsEngine { fn process_response( &mut self, peer: &PeerId, - req_info: &RequestInfo, + req_info: &RequestInfo, response: Result, - ) -> Result, Error> { + ) -> Result, Error> { response .map_err(|e| { debug!( @@ -207,7 +207,7 @@ impl OnDemandJustificationsEngine { } }) .and_then(|(encoded, _)| { - decode_and_verify_finality_proof::( + decode_and_verify_finality_proof::( &encoded[..], req_info.block, &req_info.active_set, @@ -227,11 +227,11 @@ impl OnDemandJustificationsEngine { }) } - pub(crate) async fn next(&mut self) -> ResponseInfo { + pub(crate) async fn next(&mut self) -> ResponseInfo { let (peer, req_info, resp) = match &mut self.state { State::Idle => { futures::future::pending::<()>().await; - return ResponseInfo::Pending + return ResponseInfo::Pending; }, State::AwaitingResponse(peer, req_info, receiver) => { let resp = receiver.await; diff --git a/substrate/client/consensus/beefy/src/error.rs b/substrate/client/consensus/beefy/src/error.rs index b4773f94019..9cd09cb9933 100644 --- a/substrate/client/consensus/beefy/src/error.rs +++ b/substrate/client/consensus/beefy/src/error.rs @@ -20,6 +20,7 @@ //! //! Used for BEEFY gadget internal error handling only +use sp_blockchain::Error as ClientError; use std::fmt::Debug; #[derive(Debug, thiserror::Error)] @@ -48,6 +49,12 @@ pub enum Error { VotesGossipStreamTerminated, } +impl From for Error { + fn from(e: ClientError) -> Self { + Self::Backend(e.to_string()) + } +} + #[cfg(test)] impl PartialEq for Error { fn eq(&self, other: &Self) -> bool { diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs index a2b4c8f945d..073fee0bdbd 100644 --- a/substrate/client/consensus/beefy/src/fisherman.rs +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -20,11 +20,11 @@ use crate::{error::Error, keystore::BeefyKeystore, round::Rounds, LOG_TARGET}; use log::{debug, error, warn}; use sc_client_api::Backend; use sp_api::ProvideRuntimeApi; +use sp_application_crypto::RuntimeAppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_beefy::{ - check_equivocation_proof, - ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId, + check_equivocation_proof, AuthorityIdBound, BeefyApi, BeefySignatureHasher, DoubleVotingProof, + OpaqueKeyOwnershipProof, ValidatorSetId, }; use sp_runtime::{ generic::BlockId, @@ -33,13 +33,13 @@ use sp_runtime::{ use std::{marker::PhantomData, sync::Arc}; /// Helper struct containing the id and the key ownership proof for a validator. -pub struct ProvedValidator<'a> { +pub struct ProvedValidator<'a, AuthorityId: AuthorityIdBound> { pub id: &'a AuthorityId, pub key_owner_proof: OpaqueKeyOwnershipProof, } /// Helper used to check and report equivocations. -pub struct Fisherman { +pub struct Fisherman { backend: Arc, runtime: Arc, key_store: Arc>, @@ -47,9 +47,11 @@ pub struct Fisherman { _phantom: PhantomData, } -impl, RuntimeApi: ProvideRuntimeApi> Fisherman +impl, RuntimeApi: ProvideRuntimeApi, AuthorityId> + Fisherman where RuntimeApi::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { pub fn new( backend: Arc, @@ -64,7 +66,7 @@ where at: BlockId, offender_ids: impl Iterator, validator_set_id: ValidatorSetId, - ) -> Result>, Error> { + ) -> Result>, Error> { let hash = match at { BlockId::Hash(hash) => hash, BlockId::Number(number) => self @@ -119,8 +121,12 @@ where /// isn't necessarily the best block if there are pending authority set changes. pub fn report_double_voting( &self, - proof: DoubleVotingProof, AuthorityId, Signature>, - active_rounds: &Rounds, + proof: DoubleVotingProof< + NumberFor, + AuthorityId, + ::Signature, + >, + active_rounds: &Rounds, ) -> Result<(), Error> { let (validators, validator_set_id) = (active_rounds.validators(), active_rounds.validator_set_id()); @@ -128,13 +134,13 @@ where if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "๐Ÿฅฉ Skipping report for bad equivocation {:?}", proof); - return Ok(()) + return Ok(()); } if let Some(local_id) = self.key_store.authority_id(validators) { if offender_id == &local_id { warn!(target: LOG_TARGET, "๐Ÿฅฉ Skipping report for own equivocation"); - return Ok(()) + return Ok(()); } } diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index ed8ed68c4e8..c01fb3db484 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -22,7 +22,7 @@ use log::debug; use sp_api::ProvideRuntimeApi; use sp_consensus::Error as ConsensusError; -use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, BeefyApi, BEEFY_ENGINE_ID}; +use sp_consensus_beefy::{AuthorityIdBound, BeefyApi, BEEFY_ENGINE_ID}; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, EncodedJustification, @@ -45,15 +45,17 @@ use crate::{ /// Wraps a `inner: BlockImport` and ultimately defers to it. /// /// When using BEEFY, the block import worker should be using this block import object. -pub struct BeefyBlockImport { +pub struct BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefyVersionedFinalityProofSender, + justification_sender: BeefyVersionedFinalityProofSender, metrics: Option, } -impl Clone for BeefyBlockImport { +impl Clone + for BeefyBlockImport +{ fn clone(&self) -> Self { BeefyBlockImport { backend: self.backend.clone(), @@ -65,32 +67,35 @@ impl Clone for BeefyBlockImport BeefyBlockImport { +impl + BeefyBlockImport +{ /// Create a new BeefyBlockImport. pub fn new( backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefyVersionedFinalityProofSender, + justification_sender: BeefyVersionedFinalityProofSender, metrics: Option, - ) -> BeefyBlockImport { + ) -> BeefyBlockImport { BeefyBlockImport { backend, runtime, inner, justification_sender, metrics } } } -impl BeefyBlockImport +impl BeefyBlockImport where Block: BlockT, BE: Backend, Runtime: ProvideRuntimeApi, Runtime::Api: BeefyApi + Send, + AuthorityId: AuthorityIdBound, { fn decode_and_verify( &self, encoded: &EncodedJustification, number: NumberFor, hash: ::Hash, - ) -> Result, ConsensusError> { + ) -> Result, ConsensusError> { use ConsensusError::ClientImport as ImportError; let beefy_genesis = self .runtime @@ -99,7 +104,7 @@ where .map_err(|e| ImportError(e.to_string()))? .ok_or_else(|| ImportError("Unknown BEEFY genesis".to_string()))?; if number < beefy_genesis { - return Err(ImportError("BEEFY genesis is set for future block".to_string())) + return Err(ImportError("BEEFY genesis is set for future block".to_string())); } let validator_set = self .runtime @@ -108,19 +113,21 @@ where .map_err(|e| ImportError(e.to_string()))? .ok_or_else(|| ImportError("Unknown validator set".to_string()))?; - decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) + decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) .map_err(|(err, _)| err) } } #[async_trait::async_trait] -impl BlockImport for BeefyBlockImport +impl BlockImport + for BeefyBlockImport where Block: BlockT, BE: Backend, I: BlockImport + Send + Sync, Runtime: ProvideRuntimeApi + Send + Sync, Runtime::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { type Error = ConsensusError; @@ -148,7 +155,7 @@ where // The block is imported as part of some chain sync. // The voter doesn't need to process it now. // It will be detected and processed as part of the voter state init. - return Ok(inner_import_result) + return Ok(inner_import_result); }, } diff --git a/substrate/client/consensus/beefy/src/justification.rs b/substrate/client/consensus/beefy/src/justification.rs index 886368c9d7c..9ff7c3cf54f 100644 --- a/substrate/client/consensus/beefy/src/justification.rs +++ b/substrate/client/consensus/beefy/src/justification.rs @@ -17,18 +17,20 @@ // along with this program. If not, see . use codec::DecodeAll; +use sp_application_crypto::RuntimeAppPublic; use sp_consensus::Error as ConsensusError; use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - BeefySignatureHasher, KnownSignature, ValidatorSet, ValidatorSetId, VersionedFinalityProof, + AuthorityIdBound, BeefySignatureHasher, KnownSignature, ValidatorSet, ValidatorSetId, + VersionedFinalityProof, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// A finality proof with matching BEEFY authorities' signatures. -pub type BeefyVersionedFinalityProof = VersionedFinalityProof, Signature>; +pub type BeefyVersionedFinalityProof = + VersionedFinalityProof, ::Signature>; -pub(crate) fn proof_block_num_and_set_id( - proof: &BeefyVersionedFinalityProof, +pub(crate) fn proof_block_num_and_set_id( + proof: &BeefyVersionedFinalityProof, ) -> (NumberFor, ValidatorSetId) { match proof { VersionedFinalityProof::V1(sc) => @@ -37,23 +39,26 @@ pub(crate) fn proof_block_num_and_set_id( } /// Decode and verify a Beefy FinalityProof. -pub(crate) fn decode_and_verify_finality_proof( +pub(crate) fn decode_and_verify_finality_proof( encoded: &[u8], target_number: NumberFor, validator_set: &ValidatorSet, -) -> Result, (ConsensusError, u32)> { - let proof = >::decode_all(&mut &*encoded) +) -> Result, (ConsensusError, u32)> { + let proof = >::decode_all(&mut &*encoded) .map_err(|_| (ConsensusError::InvalidJustification, 0))?; - verify_with_validator_set::(target_number, validator_set, &proof)?; + verify_with_validator_set::(target_number, validator_set, &proof)?; Ok(proof) } /// Verify the Beefy finality proof against the validator set at the block it was generated. -pub(crate) fn verify_with_validator_set<'a, Block: BlockT>( +pub(crate) fn verify_with_validator_set<'a, Block: BlockT, AuthorityId: AuthorityIdBound>( target_number: NumberFor, validator_set: &'a ValidatorSet, - proof: &'a BeefyVersionedFinalityProof, -) -> Result>, (ConsensusError, u32)> { + proof: &'a BeefyVersionedFinalityProof, +) -> Result< + Vec::Signature>>, + (ConsensusError, u32), +> { match proof { VersionedFinalityProof::V1(signed_commitment) => { let signatories = signed_commitment @@ -78,7 +83,7 @@ pub(crate) fn verify_with_validator_set<'a, Block: BlockT>( pub(crate) mod tests { use codec::Encode; use sp_consensus_beefy::{ - known_payloads, test_utils::Keyring, Commitment, Payload, SignedCommitment, + ecdsa_crypto, known_payloads, test_utils::Keyring, Commitment, Payload, SignedCommitment, VersionedFinalityProof, }; use substrate_test_runtime_client::runtime::Block; @@ -88,9 +93,9 @@ pub(crate) mod tests { pub(crate) fn new_finality_proof( block_num: NumberFor, - validator_set: &ValidatorSet, - keys: &[Keyring], - ) -> BeefyVersionedFinalityProof { + validator_set: &ValidatorSet, + keys: &[Keyring], + ) -> BeefyVersionedFinalityProof { let commitment = Commitment { payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: block_num, @@ -112,11 +117,20 @@ pub(crate) mod tests { let good_proof = proof.clone().into(); // should verify successfully - verify_with_validator_set::(block_num, &validator_set, &good_proof).unwrap(); + verify_with_validator_set::( + block_num, + &validator_set, + &good_proof, + ) + .unwrap(); // wrong block number -> should fail verification let good_proof = proof.clone().into(); - match verify_with_validator_set::(block_num + 1, &validator_set, &good_proof) { + match verify_with_validator_set::( + block_num + 1, + &validator_set, + &good_proof, + ) { Err((ConsensusError::InvalidJustification, 0)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -124,7 +138,11 @@ pub(crate) mod tests { // wrong validator set id -> should fail verification let good_proof = proof.clone().into(); let other = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - match verify_with_validator_set::(block_num, &other, &good_proof) { + match verify_with_validator_set::( + block_num, + &other, + &good_proof, + ) { Err((ConsensusError::InvalidJustification, 0)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -136,7 +154,11 @@ pub(crate) mod tests { VersionedFinalityProof::V1(ref mut sc) => sc, }; bad_signed_commitment.signatures.pop().flatten().unwrap(); - match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + match verify_with_validator_set::( + block_num + 1, + &validator_set, + &bad_proof.into(), + ) { Err((ConsensusError::InvalidJustification, 0)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -148,7 +170,11 @@ pub(crate) mod tests { }; // remove a signature (but same length) *bad_signed_commitment.signatures.first_mut().unwrap() = None; - match verify_with_validator_set::(block_num, &validator_set, &bad_proof.into()) { + match verify_with_validator_set::( + block_num, + &validator_set, + &bad_proof.into(), + ) { Err((ConsensusError::InvalidJustification, 2)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -159,9 +185,15 @@ pub(crate) mod tests { VersionedFinalityProof::V1(ref mut sc) => sc, }; // change a signature to a different key - *bad_signed_commitment.signatures.first_mut().unwrap() = - Some(Keyring::::Dave.sign(&bad_signed_commitment.commitment.encode())); - match verify_with_validator_set::(block_num, &validator_set, &bad_proof.into()) { + *bad_signed_commitment.signatures.first_mut().unwrap() = Some( + Keyring::::Dave + .sign(&bad_signed_commitment.commitment.encode()), + ); + match verify_with_validator_set::( + block_num, + &validator_set, + &bad_proof.into(), + ) { Err((ConsensusError::InvalidJustification, 3)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -175,12 +207,17 @@ pub(crate) mod tests { // build valid justification let proof = new_finality_proof(block_num, &validator_set, keys); - let versioned_proof: BeefyVersionedFinalityProof = proof.into(); + let versioned_proof: BeefyVersionedFinalityProof = + proof.into(); let encoded = versioned_proof.encode(); // should successfully decode and verify - let verified = - decode_and_verify_finality_proof::(&encoded, block_num, &validator_set).unwrap(); + let verified = decode_and_verify_finality_proof::( + &encoded, + block_num, + &validator_set, + ) + .unwrap(); assert_eq!(verified, versioned_proof); } } diff --git a/substrate/client/consensus/beefy/src/keystore.rs b/substrate/client/consensus/beefy/src/keystore.rs index 9582c2661c3..8daf3440c7d 100644 --- a/substrate/client/consensus/beefy/src/keystore.rs +++ b/substrate/client/consensus/beefy/src/keystore.rs @@ -15,19 +15,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use codec::Decode; +use log::warn; use sp_application_crypto::{key_types::BEEFY as BEEFY_KEY_TYPE, AppCrypto, RuntimeAppPublic}; -use sp_consensus_beefy::{AuthorityIdBound, BeefyAuthorityId, BeefySignatureHasher}; -use sp_core::ecdsa; #[cfg(feature = "bls-experimental")] use sp_core::ecdsa_bls377; -use sp_crypto_hashing::keccak_256; -use sp_keystore::KeystorePtr; +use sp_core::{ecdsa, keccak_256}; -use codec::Decode; -use log::warn; +use sp_keystore::KeystorePtr; use std::marker::PhantomData; +use sp_consensus_beefy::{AuthorityIdBound, BeefyAuthorityId, BeefySignatureHasher}; + use crate::{error, LOG_TARGET}; /// A BEEFY specific keystore implemented as a `Newtype`. This is basically a @@ -175,10 +175,7 @@ impl BeefyKeystore { } } -impl From> for BeefyKeystore -where - ::Signature: Send + Sync, -{ +impl From> for BeefyKeystore { fn from(store: Option) -> BeefyKeystore { BeefyKeystore(store, PhantomData) } diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 0e49839f0fd..4cb014b00d5 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -43,8 +43,7 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet, - BEEFY_ENGINE_ID, + AuthorityIdBound, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; @@ -118,50 +117,55 @@ where /// Links between the block importer, the background voter and the RPC layer, /// to be used by the voter. #[derive(Clone)] -pub struct BeefyVoterLinks { +pub struct BeefyVoterLinks { // BlockImport -> Voter links /// Stream of BEEFY signed commitments from block import to voter. - pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, + pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, // Voter -> RPC links /// Sends BEEFY signed commitments from voter to RPC. - pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, + pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, /// Sends BEEFY best block hashes from voter to RPC. pub to_rpc_best_block_sender: BeefyBestBlockSender, } /// Links used by the BEEFY RPC layer, from the BEEFY background voter. #[derive(Clone)] -pub struct BeefyRPCLinks { +pub struct BeefyRPCLinks { /// Stream of signed commitments coming from the voter. - pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, + pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, /// Stream of BEEFY best block hashes coming from the voter. pub from_voter_best_beefy_stream: BeefyBestBlockStream, } /// Make block importer and link half necessary to tie the background voter to it. -pub fn beefy_block_import_and_links( +pub fn beefy_block_import_and_links( wrapped_block_import: I, backend: Arc, runtime: Arc, prometheus_registry: Option, -) -> (BeefyBlockImport, BeefyVoterLinks, BeefyRPCLinks) +) -> ( + BeefyBlockImport, + BeefyVoterLinks, + BeefyRPCLinks, +) where B: Block, BE: Backend, I: BlockImport + Send + Sync, RuntimeApi: ProvideRuntimeApi + Send + Sync, RuntimeApi::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let metrics = register_metrics(prometheus_registry); // BlockImport @@ -201,7 +205,7 @@ pub struct BeefyNetworkParams { } /// BEEFY gadget initialization parameters. -pub struct BeefyParams { +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend @@ -219,7 +223,7 @@ pub struct BeefyParams { /// Prometheus metric registry pub prometheus_registry: Option, /// Links between the block importer, the background voter and the RPC layer. - pub links: BeefyVoterLinks, + pub links: BeefyVoterLinks, /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, /// Whether running under "Authority" role. @@ -228,10 +232,10 @@ pub struct BeefyParams { /// Helper object holding BEEFY worker communication/gossip components. /// /// These are created once, but will be reused if worker is restarted/reinitialized. -pub(crate) struct BeefyComms { +pub(crate) struct BeefyComms { pub gossip_engine: GossipEngine, - pub gossip_validator: Arc>, - pub on_demand_justifications: OnDemandJustificationsEngine, + pub gossip_validator: Arc>, + pub on_demand_justifications: OnDemandJustificationsEngine, } /// Helper builder object for building [worker::BeefyWorker]. @@ -240,22 +244,23 @@ pub(crate) struct BeefyComms { /// for certain chain and backend conditions, and while sleeping we still need to pump the /// GossipEngine. Once initialization is done, the GossipEngine (and other pieces) are added to get /// the complete [worker::BeefyWorker] object. -pub(crate) struct BeefyWorkerBuilder { +pub(crate) struct BeefyWorkerBuilder { // utilities backend: Arc, runtime: Arc, key_store: BeefyKeystore, // voter metrics metrics: Option, - persisted_state: PersistedState, + persisted_state: PersistedState, } -impl BeefyWorkerBuilder +impl BeefyWorkerBuilder where B: Block + codec::Codec, BE: Backend, R: ProvideRuntimeApi, R::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { /// This will wait for the chain to enable BEEFY (if not yet enabled) and also wait for the /// backend to sync all headers required by the voter to build a contiguous chain of mandatory @@ -269,7 +274,7 @@ where key_store: BeefyKeystore, metrics: Option, min_block_delta: u32, - gossip_validator: Arc>, + gossip_validator: Arc>, finality_notifications: &mut Fuse>, is_authority: bool, ) -> Result { @@ -301,11 +306,11 @@ where self, payload_provider: P, sync: Arc, - comms: BeefyComms, - links: BeefyVoterLinks, - pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + comms: BeefyComms, + links: BeefyVoterLinks, + pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, is_authority: bool, - ) -> BeefyWorker { + ) -> BeefyWorker { let key_store = Arc::new(self.key_store); BeefyWorker { backend: self.backend.clone(), @@ -334,7 +339,7 @@ where min_block_delta: u32, backend: Arc, runtime: Arc, - ) -> Result, Error> { + ) -> Result, Error> { let blockchain = backend.blockchain(); let beefy_genesis = runtime @@ -378,7 +383,7 @@ where beefy_genesis, ) .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; - break state + break state; } if *header.number() == beefy_genesis { @@ -401,10 +406,10 @@ where min_block_delta, beefy_genesis, ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; } - if let Some(active) = find_authorities_change::(&header) { + if let Some(active) = find_authorities_change::(&header) { debug!( target: LOG_TARGET, "๐Ÿฅฉ Marking block {:?} as BEEFY Mandatory.", @@ -431,7 +436,7 @@ where key_store: &BeefyKeystore, metrics: &Option, is_authority: bool, - ) -> Result, Error> { + ) -> Result, Error> { // Initialize voter state from AUX DB if compatible. if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? // Verify state pallet genesis matches runtime. @@ -448,7 +453,7 @@ where let mut header = best_grandpa.clone(); while *header.number() > state.best_beefy() { if state.voting_oracle().can_add_session(*header.number()) { - if let Some(active) = find_authorities_change::(&header) { + if let Some(active) = find_authorities_change::(&header) { new_sessions.push((active, *header.number())); } } @@ -471,7 +476,7 @@ where is_authority, ); } - return Ok(state) + return Ok(state); } // No valid voter-state persisted, re-initialize from pallet genesis. @@ -482,8 +487,8 @@ where /// Start the BEEFY gadget. /// /// This is a thin shim around running and awaiting a BEEFY worker. -pub async fn start_beefy_gadget( - beefy_params: BeefyParams, +pub async fn start_beefy_gadget( + beefy_params: BeefyParams, ) where B: Block, BE: Backend, @@ -493,6 +498,7 @@ pub async fn start_beefy_gadget( R::Api: BeefyApi, N: GossipNetwork + NetworkRequest + Send + Sync + 'static, S: GossipSyncing + SyncOracle + 'static, + AuthorityId: AuthorityIdBound, { let BeefyParams { client, @@ -598,15 +604,17 @@ pub async fn start_beefy_gadget( futures::future::Either::Left(((error::Error::ConsensusReset, reuse_comms), _)) => { error!(target: LOG_TARGET, "๐Ÿฅฉ Error: {:?}. Restarting voter.", error::Error::ConsensusReset); beefy_comms = reuse_comms; - continue + continue; }, // On other errors, bring down / finish the task. - futures::future::Either::Left(((worker_err, _), _)) => - error!(target: LOG_TARGET, "๐Ÿฅฉ Error: {:?}. Terminating.", worker_err), - futures::future::Either::Right((odj_handler_err, _)) => - error!(target: LOG_TARGET, "๐Ÿฅฉ Error: {:?}. Terminating.", odj_handler_err), + futures::future::Either::Left(((worker_err, _), _)) => { + error!(target: LOG_TARGET, "๐Ÿฅฉ Error: {:?}. Terminating.", worker_err) + }, + futures::future::Either::Right((odj_handler_err, _)) => { + error!(target: LOG_TARGET, "๐Ÿฅฉ Error: {:?}. Terminating.", odj_handler_err) + }, }; - return + return; } } @@ -651,7 +659,7 @@ where /// Wait for BEEFY runtime pallet to be available, return active validator set. /// Should be called only once during worker initialization. -async fn wait_for_runtime_pallet( +async fn wait_for_runtime_pallet( runtime: &R, finality: &mut Fuse>, ) -> Result<(NumberFor, ::Header), Error> @@ -676,7 +684,7 @@ where "๐Ÿฅฉ BEEFY pallet available: block {:?} beefy genesis {:?}", notif.header.number(), start ); - return Ok((start, notif.header)) + return Ok((start, notif.header)); } } } @@ -687,7 +695,7 @@ where /// /// Note: function will `async::sleep()` when walking back the chain if some needed header hasn't /// been synced yet (as it happens when warp syncing when headers are synced in the background). -async fn expect_validator_set( +async fn expect_validator_set( runtime: &R, backend: &BE, at_header: &B::Header, @@ -711,9 +719,9 @@ where loop { debug!(target: LOG_TARGET, "๐Ÿฅฉ Looking for auth set change at block number: {:?}", *header.number()); if let Ok(Some(active)) = runtime.runtime_api().validator_set(header.hash()) { - return Ok(active) + return Ok(active); } else { - match find_authorities_change::(&header) { + match find_authorities_change::(&header) { Some(active) => return Ok(active), // Move up the chain. Ultimately we'll get it from chain genesis state, or error out // there. @@ -728,9 +736,12 @@ where /// Scan the `header` digest log for a BEEFY validator set change. Return either the new /// validator set or `None` in case no validator set change has been signaled. -pub(crate) fn find_authorities_change(header: &B::Header) -> Option> +pub(crate) fn find_authorities_change( + header: &B::Header, +) -> Option> where B: Block, + AuthorityId: AuthorityIdBound, { let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); diff --git a/substrate/client/consensus/beefy/src/round.rs b/substrate/client/consensus/beefy/src/round.rs index 5dae80cb183..31cfe4c10c2 100644 --- a/substrate/client/consensus/beefy/src/round.rs +++ b/substrate/client/consensus/beefy/src/round.rs @@ -20,9 +20,10 @@ use crate::LOG_TARGET; use codec::{Decode, Encode}; use log::{debug, info}; +use sp_application_crypto::RuntimeAppPublic; use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, + AuthorityIdBound, Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, + ValidatorSetId, VoteMessage, }; use sp_runtime::traits::{Block, NumberFor}; use std::collections::BTreeMap; @@ -31,15 +32,24 @@ use std::collections::BTreeMap; /// whether the local `self` validator has voted/signed. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Debug, Decode, Default, Encode, PartialEq)] -pub(crate) struct RoundTracker { - votes: BTreeMap, +#[derive(Debug, Decode, Encode, PartialEq)] +pub(crate) struct RoundTracker { + votes: BTreeMap::Signature>, +} + +impl Default for RoundTracker { + fn default() -> Self { + Self { votes: Default::default() } + } } -impl RoundTracker { - fn add_vote(&mut self, vote: (AuthorityId, Signature)) -> bool { +impl RoundTracker { + fn add_vote( + &mut self, + vote: (AuthorityId, ::Signature), + ) -> bool { if self.votes.contains_key(&vote.0) { - return false + return false; } self.votes.insert(vote.0, vote.1); @@ -58,10 +68,12 @@ pub fn threshold(authorities: usize) -> usize { } #[derive(Debug, PartialEq)] -pub enum VoteImportResult { +pub enum VoteImportResult { Ok, - RoundConcluded(SignedCommitment, Signature>), - DoubleVoting(DoubleVotingProof, AuthorityId, Signature>), + RoundConcluded(SignedCommitment, ::Signature>), + DoubleVoting( + DoubleVotingProof, AuthorityId, ::Signature>, + ), Invalid, Stale, } @@ -71,19 +83,22 @@ pub enum VoteImportResult { /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). #[derive(Debug, Decode, Encode, PartialEq)] -pub(crate) struct Rounds { - rounds: BTreeMap>, RoundTracker>, - previous_votes: - BTreeMap<(AuthorityId, NumberFor), VoteMessage, AuthorityId, Signature>>, +pub(crate) struct Rounds { + rounds: BTreeMap>, RoundTracker>, + previous_votes: BTreeMap< + (AuthorityId, NumberFor), + VoteMessage, AuthorityId, ::Signature>, + >, session_start: NumberFor, validator_set: ValidatorSet, mandatory_done: bool, best_done: Option>, } -impl Rounds +impl Rounds where B: Block, + AuthorityId: AuthorityIdBound, { pub(crate) fn new( session_start: NumberFor, @@ -121,14 +136,14 @@ where pub(crate) fn add_vote( &mut self, - vote: VoteMessage, AuthorityId, Signature>, - ) -> VoteImportResult { + vote: VoteMessage, AuthorityId, ::Signature>, + ) -> VoteImportResult { let num = vote.commitment.block_number; let vote_key = (vote.id.clone(), num); if num < self.session_start || Some(num) <= self.best_done { debug!(target: LOG_TARGET, "๐Ÿฅฉ received vote for old stale round {:?}, ignoring", num); - return VoteImportResult::Stale + return VoteImportResult::Stale; } else if vote.commitment.validator_set_id != self.validator_set_id() { debug!( target: LOG_TARGET, @@ -136,14 +151,14 @@ where self.validator_set_id(), vote, ); - return VoteImportResult::Invalid + return VoteImportResult::Invalid; } else if !self.validators().iter().any(|id| &vote.id == id) { debug!( target: LOG_TARGET, "๐Ÿฅฉ received vote {:?} from validator that is not in the validator set, ignoring", vote ); - return VoteImportResult::Invalid + return VoteImportResult::Invalid; } if let Some(previous_vote) = self.previous_votes.get(&vote_key) { @@ -156,7 +171,7 @@ where return VoteImportResult::DoubleVoting(DoubleVotingProof { first: previous_vote.clone(), second: vote, - }) + }); } } else { // this is the first vote sent by `id` for `num`, all good @@ -169,7 +184,7 @@ where round.is_done(threshold(self.validator_set.len())) { if let Some(round) = self.rounds.remove_entry(&vote.commitment) { - return VoteImportResult::RoundConcluded(self.signed_commitment(round)) + return VoteImportResult::RoundConcluded(self.signed_commitment(round)); } } VoteImportResult::Ok @@ -177,8 +192,8 @@ where fn signed_commitment( &mut self, - round: (Commitment>, RoundTracker), - ) -> SignedCommitment, Signature> { + round: (Commitment>, RoundTracker), + ) -> SignedCommitment, ::Signature> { let votes = round.1.votes; let signatures = self .validators() @@ -207,14 +222,14 @@ mod tests { use sc_network_test::Block; use sp_consensus_beefy::{ - known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, DoubleVotingProof, Payload, - SignedCommitment, ValidatorSet, VoteMessage, + ecdsa_crypto, known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, + DoubleVotingProof, Payload, SignedCommitment, ValidatorSet, VoteMessage, }; - use super::{threshold, AuthorityId, Block as BlockT, RoundTracker, Rounds}; + use super::{threshold, Block as BlockT, RoundTracker, Rounds}; use crate::round::VoteImportResult; - impl Rounds + impl Rounds where B: BlockT, { @@ -225,8 +240,11 @@ mod tests { #[test] fn round_tracker() { - let mut rt = RoundTracker::default(); - let bob_vote = (Keyring::Bob.public(), Keyring::::Bob.sign(b"I am committed")); + let mut rt = RoundTracker::::default(); + let bob_vote = ( + Keyring::::Bob.public(), + Keyring::::Bob.sign(b"I am committed"), + ); let threshold = 2; // adding new vote allowed @@ -237,8 +255,10 @@ mod tests { // vote is not done assert!(!rt.is_done(threshold)); - let alice_vote = - (Keyring::Alice.public(), Keyring::::Alice.sign(b"I am committed")); + let alice_vote = ( + Keyring::::Alice.public(), + Keyring::::Alice.sign(b"I am committed"), + ); // adding new vote (self vote this time) allowed assert!(rt.add_vote(alice_vote)); @@ -260,22 +280,22 @@ mod tests { fn new_rounds() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], 42, ) .unwrap(); let session_start = 1u64.into(); - let rounds = Rounds::::new(session_start, validators); + let rounds = Rounds::::new(session_start, validators); assert_eq!(42, rounds.validator_set_id()); assert_eq!(1, rounds.session_start()); assert_eq!( &vec![ - Keyring::::Alice.public(), - Keyring::::Bob.public(), - Keyring::::Charlie.public() + Keyring::::Alice.public(), + Keyring::::Bob.public(), + Keyring::::Charlie.public() ], rounds.validators() ); @@ -285,7 +305,7 @@ mod tests { fn add_and_conclude_votes() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![ Keyring::Alice.public(), Keyring::Bob.public(), @@ -298,7 +318,7 @@ mod tests { let validator_set_id = validators.id(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![]); let block_number = 1; @@ -306,7 +326,7 @@ mod tests { let mut vote = VoteMessage { id: Keyring::Alice.public(), commitment: commitment.clone(), - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; // add 1st good vote assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Ok); @@ -315,26 +335,26 @@ mod tests { assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Ok); vote.id = Keyring::Dave.public(); - vote.signature = Keyring::::Dave.sign(b"I am committed"); + vote.signature = Keyring::::Dave.sign(b"I am committed"); // invalid vote (Dave is not a validator) assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Invalid); vote.id = Keyring::Bob.public(); - vote.signature = Keyring::::Bob.sign(b"I am committed"); + vote.signature = Keyring::::Bob.sign(b"I am committed"); // add 2nd good vote assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Ok); vote.id = Keyring::Charlie.public(); - vote.signature = Keyring::::Charlie.sign(b"I am committed"); + vote.signature = Keyring::::Charlie.sign(b"I am committed"); // add 3rd good vote -> round concluded -> signatures present assert_eq!( rounds.add_vote(vote.clone()), VoteImportResult::RoundConcluded(SignedCommitment { commitment, signatures: vec![ - Some(Keyring::::Alice.sign(b"I am committed")), - Some(Keyring::::Bob.sign(b"I am committed")), - Some(Keyring::::Charlie.sign(b"I am committed")), + Some(Keyring::::Alice.sign(b"I am committed")), + Some(Keyring::::Bob.sign(b"I am committed")), + Some(Keyring::::Charlie.sign(b"I am committed")), None, ] }) @@ -342,7 +362,7 @@ mod tests { rounds.conclude(block_number); vote.id = Keyring::Eve.public(); - vote.signature = Keyring::::Eve.sign(b"I am committed"); + vote.signature = Keyring::::Eve.sign(b"I am committed"); // Eve is a validator, but round was concluded, adding vote disallowed assert_eq!(rounds.add_vote(vote), VoteImportResult::Stale); } @@ -351,7 +371,7 @@ mod tests { fn old_rounds_not_accepted() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], 42, ) @@ -360,7 +380,7 @@ mod tests { // active rounds starts at block 10 let session_start = 10u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); // vote on round 9 let block_number = 9; @@ -369,7 +389,7 @@ mod tests { let mut vote = VoteMessage { id: Keyring::Alice.public(), commitment, - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; // add vote for previous session, should fail assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Stale); @@ -397,7 +417,7 @@ mod tests { fn multiple_rounds() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], Default::default(), ) @@ -405,29 +425,29 @@ mod tests { let validator_set_id = validators.id(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![]); let commitment = Commitment { block_number: 1, payload, validator_set_id }; let mut alice_vote = VoteMessage { id: Keyring::Alice.public(), commitment: commitment.clone(), - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; let mut bob_vote = VoteMessage { id: Keyring::Bob.public(), commitment: commitment.clone(), - signature: Keyring::::Bob.sign(b"I am committed"), + signature: Keyring::::Bob.sign(b"I am committed"), }; let mut charlie_vote = VoteMessage { id: Keyring::Charlie.public(), commitment, - signature: Keyring::::Charlie.sign(b"I am committed"), + signature: Keyring::::Charlie.sign(b"I am committed"), }; let expected_signatures = vec![ - Some(Keyring::::Alice.sign(b"I am committed")), - Some(Keyring::::Bob.sign(b"I am committed")), - Some(Keyring::::Charlie.sign(b"I am committed")), + Some(Keyring::::Alice.sign(b"I am committed")), + Some(Keyring::::Bob.sign(b"I am committed")), + Some(Keyring::::Charlie.sign(b"I am committed")), ]; // round 1 - only 2 out of 3 vote @@ -472,14 +492,14 @@ mod tests { fn should_provide_equivocation_proof() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public()], Default::default(), ) .unwrap(); let validator_set_id = validators.id(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![1, 1, 1, 1]); let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![2, 2, 2, 2]); @@ -489,7 +509,7 @@ mod tests { let alice_vote1 = VoteMessage { id: Keyring::Alice.public(), commitment: commitment1, - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; let mut alice_vote2 = alice_vote1.clone(); alice_vote2.commitment = commitment2; diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 2bb145d660d..681e11a0c53 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -55,6 +55,7 @@ use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_application_crypto::key_types::BEEFY as BEEFY_KEY_TYPE; use sp_consensus::BlockOrigin; use sp_consensus_beefy::{ + ecdsa_crypto, ecdsa_crypto::{AuthorityId, Signature}, known_payloads, mmr::{find_mmr_root_digest, MmrRootProvider}, @@ -89,6 +90,7 @@ type BeefyBlockImport = crate::BeefyBlockImport< substrate_test_runtime_client::Backend, TestApi, BlockImportAdapter, + AuthorityId, >; pub(crate) type BeefyValidatorSet = ValidatorSet; @@ -107,8 +109,8 @@ impl BuildStorage for Genesis { #[derive(Default)] pub(crate) struct PeerData { - pub(crate) beefy_rpc_links: Mutex>>, - pub(crate) beefy_voter_links: Mutex>>, + pub(crate) beefy_rpc_links: Mutex>>, + pub(crate) beefy_voter_links: Mutex>>, pub(crate) beefy_justif_req_handler: Mutex>>, } @@ -371,7 +373,7 @@ async fn voter_init_setup( net: &mut BeefyTestNet, finality: &mut futures::stream::Fuse>, api: &TestApi, -) -> Result, Error> { +) -> Result, Error> { let backend = net.peer(0).client().as_backend(); let (beefy_genesis, best_grandpa) = wait_for_runtime_pallet(api, finality).await.unwrap(); let key_store = None.into(); @@ -446,7 +448,7 @@ where on_demand_justifications_handler: on_demand_justif_handler, is_authority: true, }; - let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} assert_send(&task); @@ -472,8 +474,10 @@ pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, // peer index and key peers: impl Iterator)>, -) -> (Vec>, Vec>>) -{ +) -> ( + Vec>, + Vec>>, +) { let mut best_block_streams = Vec::new(); let mut versioned_finality_proof_streams = Vec::new(); peers.for_each(|(index, _)| { @@ -511,7 +515,7 @@ async fn wait_for_best_beefy_blocks( } async fn wait_for_beefy_signed_commitments( - streams: Vec>>, + streams: Vec>>, net: &Arc>, expected_commitment_block_nums: &[u64], ) { @@ -1417,7 +1421,7 @@ async fn beefy_reports_equivocations() { for wait_ms in [250, 500, 1250, 3000] { run_for(Duration::from_millis(wait_ms), &net).await; if !api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty() { - break + break; } } @@ -1457,7 +1461,7 @@ async fn gossipped_finality_proofs() { // Charlie will run just the gossip engine and not the full voter. let gossip_validator = GossipValidator::new(known_peers, Arc::new(TestNetwork::new().0)); let charlie_gossip_validator = Arc::new(gossip_validator); - charlie_gossip_validator.update_filter(GossipFilterCfg:: { + charlie_gossip_validator.update_filter(GossipFilterCfg:: { start: 1, end: 10, validator_set: &validator_set, @@ -1501,7 +1505,7 @@ async fn gossipped_finality_proofs() { let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); // Charlie gossips finality proof for #1 -> Alice and Bob also finalize. let proof = crate::communication::gossip::tests::dummy_proof(1, &validator_set); - let gossip_proof = GossipMessage::::FinalityProof(proof); + let gossip_proof = GossipMessage::::FinalityProof(proof); let encoded_proof = gossip_proof.encode(); charlie_gossip_engine.gossip_message(proofs_topic::(), encoded_proof, true); // Expect #1 is finalized. @@ -1526,7 +1530,8 @@ async fn gossipped_finality_proofs() { let commitment = Commitment { payload, block_number, validator_set_id: validator_set.id() }; let signature = sign_commitment(&BeefyKeyring::Charlie, &commitment); let vote_message = VoteMessage { commitment, id: BeefyKeyring::Charlie.public(), signature }; - let encoded_vote = GossipMessage::::Vote(vote_message).encode(); + let encoded_vote = + GossipMessage::::Vote(vote_message).encode(); charlie_gossip_engine.gossip_message(votes_topic::(), encoded_vote, true); // Expect #2 is finalized. @@ -1538,12 +1543,15 @@ async fn gossipped_finality_proofs() { charlie_gossip_engine .messages_for(proofs_topic::()) .filter_map(|notification| async move { - GossipMessage::::decode(&mut ¬ification.message[..]).ok().and_then( - |message| match message { - GossipMessage::::Vote(_) => unreachable!(), - GossipMessage::::FinalityProof(proof) => Some(proof), - }, + GossipMessage::::decode( + &mut ¬ification.message[..], ) + .ok() + .and_then(|message| match message { + GossipMessage::::Vote(_) => unreachable!(), + GossipMessage::::FinalityProof(proof) => + Some(proof), + }) }) .fuse(), ); @@ -1561,7 +1569,7 @@ async fn gossipped_finality_proofs() { // verify finality proof has been gossipped proof = charlie_gossip_proofs.next() => { let proof = proof.unwrap(); - let (round, _) = proof_block_num_and_set_id::(&proof); + let (round, _) = proof_block_num_and_set_id::(&proof); match round { 1 => continue, // finality proof generated by Charlie in the previous round 2 => break, // finality proof generated by Alice or Bob and gossiped to Charlie diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index cfbb3d63aea..3ce4da7ecd5 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -31,6 +31,8 @@ use crate::{ round::{Rounds, VoteImportResult}, BeefyComms, BeefyVoterLinks, LOG_TARGET, }; +use sp_application_crypto::RuntimeAppPublic; + use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, trace, warn}; @@ -40,9 +42,8 @@ use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, VersionedFinalityProof, - VoteMessage, BEEFY_ENGINE_ID, + AuthorityIdBound, BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ generic::BlockId, @@ -52,6 +53,7 @@ use sp_runtime::{ use std::{ collections::{BTreeMap, VecDeque}, fmt::Debug, + marker::PhantomData, sync::Arc, }; @@ -72,7 +74,7 @@ pub(crate) enum RoundAction { /// Note: this is part of `PersistedState` so any changes here should also bump /// aux-db schema version. #[derive(Debug, Decode, Encode, PartialEq)] -pub(crate) struct VoterOracle { +pub(crate) struct VoterOracle { /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. /// /// There are three voter states corresponding to three queue states: @@ -82,19 +84,23 @@ pub(crate) struct VoterOracle { /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. /// In this state, every time a session gets its mandatory block BEEFY finalized, it's /// popped off the queue, eventually getting to state `2. up-to-date`. - sessions: VecDeque>, + sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, /// Best block we received a GRANDPA finality for. best_grandpa_block_header: ::Header, /// Best block a BEEFY voting round has been concluded for. best_beefy_block: NumberFor, + _phantom: PhantomData AuthorityId>, } -impl VoterOracle { +impl VoterOracle +where + AuthorityId: AuthorityIdBound, +{ /// Verify provided `sessions` satisfies requirements, then build `VoterOracle`. pub fn checked_new( - sessions: VecDeque>, + sessions: VecDeque>, min_block_delta: u32, grandpa_header: ::Header, best_beefy: NumberFor, @@ -105,24 +111,24 @@ impl VoterOracle { let mut validate = || -> bool { let best_grandpa = *grandpa_header.number(); if sessions.is_empty() || best_beefy > best_grandpa { - return false + return false; } for (idx, session) in sessions.iter().enumerate() { let start = session.session_start(); if session.validators().is_empty() { - return false + return false; } if start > best_grandpa || start <= prev_start { - return false + return false; } #[cfg(not(test))] if let Some(prev_id) = prev_validator_id { if session.validator_set_id() <= prev_id { - return false + return false; } } if idx != 0 && session.mandatory_done() { - return false + return false; } prev_start = session.session_start(); prev_validator_id = Some(session.validator_set_id()); @@ -136,6 +142,7 @@ impl VoterOracle { min_block_delta: min_block_delta.max(1), best_grandpa_block_header: grandpa_header, best_beefy_block: best_beefy, + _phantom: PhantomData, }) } else { error!( @@ -151,13 +158,13 @@ impl VoterOracle { // Return reference to rounds pertaining to first session in the queue. // Voting will always happen at the head of the queue. - fn active_rounds(&self) -> Result<&Rounds, Error> { + fn active_rounds(&self) -> Result<&Rounds, Error> { self.sessions.front().ok_or(Error::UninitSession) } // Return mutable reference to rounds pertaining to first session in the queue. // Voting will always happen at the head of the queue. - fn active_rounds_mut(&mut self) -> Result<&mut Rounds, Error> { + fn active_rounds_mut(&mut self) -> Result<&mut Rounds, Error> { self.sessions.front_mut().ok_or(Error::UninitSession) } @@ -183,7 +190,7 @@ impl VoterOracle { } /// Add new observed session to the Oracle. - pub fn add_session(&mut self, rounds: Rounds) { + pub fn add_session(&mut self, rounds: Rounds) { self.sessions.push_back(rounds); // Once we add a new session we can drop/prune previous session if it's been finalized. self.try_prune(); @@ -267,21 +274,21 @@ impl VoterOracle { /// /// Note: Any changes here should also bump aux-db schema version. #[derive(Debug, Decode, Encode, PartialEq)] -pub(crate) struct PersistedState { +pub(crate) struct PersistedState { /// Best block we voted on. best_voted: NumberFor, /// Chooses which incoming votes to accept and which votes to generate. /// Keeps track of voting seen for current and future rounds. - voting_oracle: VoterOracle, + voting_oracle: VoterOracle, /// Pallet-beefy genesis block - block number when BEEFY consensus started for this chain. pallet_genesis: NumberFor, } -impl PersistedState { +impl PersistedState { pub fn checked_new( grandpa_header: ::Header, best_beefy: NumberFor, - sessions: VecDeque>, + sessions: VecDeque>, min_block_delta: u32, pallet_genesis: NumberFor, ) -> Option { @@ -314,11 +321,11 @@ impl PersistedState { self.voting_oracle.best_grandpa_block_header = best_grandpa; } - pub fn voting_oracle(&self) -> &VoterOracle { + pub fn voting_oracle(&self) -> &VoterOracle { &self.voting_oracle } - pub(crate) fn gossip_filter_config(&self) -> Result, Error> { + pub(crate) fn gossip_filter_config(&self) -> Result, Error> { let (start, end) = self.voting_oracle.accepted_interval()?; let validator_set = self.voting_oracle.current_validator_set()?; Ok(GossipFilterCfg { start, end, validator_set }) @@ -373,34 +380,34 @@ impl PersistedState { } /// A BEEFY worker/voter that follows the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities pub backend: Arc, pub runtime: Arc, pub key_store: Arc>, pub payload_provider: P, pub sync: Arc, - pub fisherman: Arc>, + pub fisherman: Arc>, // communication (created once, but returned and reused if worker is restarted/reinitialized) - pub comms: BeefyComms, + pub comms: BeefyComms, // channels /// Links between the block importer, the background voter and the RPC layer. - pub links: BeefyVoterLinks, + pub links: BeefyVoterLinks, // voter state /// Buffer holding justifications for future processing. - pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, /// Persisted voter state. - pub persisted_state: PersistedState, + pub persisted_state: PersistedState, /// BEEFY voter metrics pub metrics: Option, /// Node runs under "Authority" role. pub is_authority: bool, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, @@ -408,17 +415,18 @@ where S: SyncOracle, R: ProvideRuntimeApi, R::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { fn best_grandpa_block(&self) -> NumberFor { *self.persisted_state.voting_oracle.best_grandpa_block_header.number() } - fn voting_oracle(&self) -> &VoterOracle { + fn voting_oracle(&self) -> &VoterOracle { &self.persisted_state.voting_oracle } #[cfg(test)] - fn active_rounds(&mut self) -> Result<&Rounds, Error> { + fn active_rounds(&mut self) -> Result<&Rounds, Error> { self.persisted_state.voting_oracle.active_rounds() } @@ -476,7 +484,8 @@ where }) .chain(std::iter::once(header.clone())) { - if let Some(new_validator_set) = find_authorities_change::(&header) { + if let Some(new_validator_set) = find_authorities_change::(&header) + { self.init_session_at(new_validator_set, *header.number()); new_session_added = true; } @@ -503,13 +512,17 @@ where /// Based on [VoterOracle] this vote is either processed here or discarded. fn triage_incoming_vote( &mut self, - vote: VoteMessage, AuthorityId, Signature>, - ) -> Result<(), Error> { + vote: VoteMessage, AuthorityId, ::Signature>, + ) -> Result<(), Error> + where + ::Signature: Encode + Decode, + { let block_num = vote.commitment.block_number; match self.voting_oracle().triage_round(block_num)? { RoundAction::Process => if let Some(finality_proof) = self.handle_vote(vote)? { - let gossip_proof = GossipMessage::::FinalityProof(finality_proof); + let gossip_proof = + GossipMessage::::FinalityProof(finality_proof); let encoded_proof = gossip_proof.encode(); self.comms.gossip_engine.gossip_message( proofs_topic::(), @@ -528,7 +541,7 @@ where /// Expects `justification` to be valid. fn triage_incoming_justif( &mut self, - justification: BeefyVersionedFinalityProof, + justification: BeefyVersionedFinalityProof, ) -> Result<(), Error> { let signed_commitment = match justification { VersionedFinalityProof::V1(ref sc) => sc, @@ -560,8 +573,8 @@ where fn handle_vote( &mut self, - vote: VoteMessage, AuthorityId, Signature>, - ) -> Result>, Error> { + vote: VoteMessage, AuthorityId, ::Signature>, + ) -> Result>, Error> { let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; let block_number = vote.commitment.block_number; @@ -576,7 +589,7 @@ where // New state is persisted after finalization. self.finalize(finality_proof.clone())?; metric_inc!(self.metrics, beefy_good_votes_processed); - return Ok(Some(finality_proof)) + return Ok(Some(finality_proof)); }, VoteImportResult::Ok => { // Persist state after handling mandatory block vote. @@ -608,14 +621,17 @@ where /// 4. Send best block hash and `finality_proof` to RPC worker. /// /// Expects `finality proof` to be valid and for a block > current-best-beefy. - fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) -> Result<(), Error> { + fn finalize( + &mut self, + finality_proof: BeefyVersionedFinalityProof, + ) -> Result<(), Error> { let block_num = match finality_proof { VersionedFinalityProof::V1(ref sc) => sc.commitment.block_number, }; if block_num <= self.persisted_state.voting_oracle.best_beefy_block { // we've already finalized this round before, short-circuit. - return Ok(()) + return Ok(()); } // Finalize inner round and update voting_oracle state. @@ -740,7 +756,7 @@ where hash } else { warn!(target: LOG_TARGET, "๐Ÿฅฉ No MMR root digest found for: {:?}", target_hash); - return Ok(()) + return Ok(()); }; let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; @@ -754,7 +770,7 @@ where target: LOG_TARGET, "๐Ÿฅฉ Missing validator id - can't vote for: {:?}", target_hash ); - return Ok(()) + return Ok(()); }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; @@ -764,7 +780,7 @@ where Ok(sig) => sig, Err(err) => { warn!(target: LOG_TARGET, "๐Ÿฅฉ Error signing commitment: {:?}", err); - return Ok(()) + return Ok(()); }, }; @@ -780,14 +796,15 @@ where error!(target: LOG_TARGET, "๐Ÿฅฉ Error handling self vote: {}", err); err })? { - let encoded_proof = GossipMessage::::FinalityProof(finality_proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(finality_proof).encode(); self.comms .gossip_engine .gossip_message(proofs_topic::(), encoded_proof, true); } else { metric_inc!(self.metrics, beefy_votes_sent); debug!(target: LOG_TARGET, "๐Ÿฅฉ Sent vote message: {:?}", vote); - let encoded_vote = GossipMessage::::Vote(vote).encode(); + let encoded_vote = GossipMessage::::Vote(vote).encode(); self.comms.gossip_engine.gossip_message(votes_topic::(), encoded_vote, false); } @@ -825,9 +842,11 @@ where /// Should never end, returns `Error` otherwise. pub(crate) async fn run( mut self, - block_import_justif: &mut Fuse>>, + block_import_justif: &mut Fuse< + NotificationReceiver>, + >, finality_notifications: &mut Fuse>, - ) -> (Error, BeefyComms) { + ) -> (Error, BeefyComms) { info!( target: LOG_TARGET, "๐Ÿฅฉ run BEEFY worker, best grandpa: #{:?}.", @@ -839,9 +858,10 @@ where .gossip_engine .messages_for(votes_topic::()) .filter_map(|notification| async move { - let vote = GossipMessage::::decode_all(&mut ¬ification.message[..]) - .ok() - .and_then(|message| message.unwrap_vote()); + let vote = + GossipMessage::::decode_all(&mut ¬ification.message[..]) + .ok() + .and_then(|message| message.unwrap_vote()); trace!(target: LOG_TARGET, "๐Ÿฅฉ Got vote message: {:?}", vote); vote }) @@ -852,9 +872,10 @@ where .gossip_engine .messages_for(proofs_topic::()) .filter_map(|notification| async move { - let proof = GossipMessage::::decode_all(&mut ¬ification.message[..]) - .ok() - .and_then(|message| message.unwrap_finality_proof()); + let proof = + GossipMessage::::decode_all(&mut ¬ification.message[..]) + .ok() + .and_then(|message| message.unwrap_finality_proof()); trace!(target: LOG_TARGET, "๐Ÿฅฉ Got gossip proof message: {:?}", proof); proof }) @@ -945,7 +966,11 @@ where /// Report the given equivocation to the BEEFY runtime module. fn report_double_voting( &self, - proof: DoubleVotingProof, AuthorityId, Signature>, + proof: DoubleVotingProof< + NumberFor, + AuthorityId, + ::Signature, + >, ) -> Result<(), Error> { let rounds = self.persisted_state.voting_oracle.active_rounds()?; self.fisherman.report_double_voting(proof, rounds) @@ -1011,7 +1036,7 @@ pub(crate) mod tests { use sc_network_test::TestNetFactory; use sp_blockchain::Backend as BlockchainBackendT; use sp_consensus_beefy::{ - known_payloads, + ecdsa_crypto, known_payloads, known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, test_utils::{generate_equivocation_proof, Keyring}, @@ -1023,8 +1048,8 @@ pub(crate) mod tests { Backend, }; - impl PersistedState { - pub fn active_round(&self) -> Result<&Rounds, Error> { + impl PersistedState { + pub fn active_round(&self) -> Result<&Rounds, Error> { self.voting_oracle.active_rounds() } @@ -1033,17 +1058,17 @@ pub(crate) mod tests { } } - impl VoterOracle { - pub fn sessions(&self) -> &VecDeque> { + impl VoterOracle { + pub fn sessions(&self) -> &VecDeque> { &self.sessions } } fn create_beefy_worker( peer: &mut BeefyPeer, - key: &Keyring, + key: &Keyring, min_block_delta: u32, - genesis_validator_set: ValidatorSet, + genesis_validator_set: ValidatorSet, ) -> BeefyWorker< Block, Backend, @@ -1051,15 +1076,16 @@ pub(crate) mod tests { TestApi, Arc>, TestNetwork, + ecdsa_crypto::AuthorityId, > { let keystore = create_beefy_keystore(key); let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); let (_, from_block_import_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let beefy_rpc_links = BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; @@ -1115,7 +1141,8 @@ pub(crate) mod tests { .unwrap(); let payload_provider = MmrRootProvider::new(api.clone()); let comms = BeefyComms { gossip_engine, gossip_validator, on_demand_justifications }; - let key_store: Arc> = Arc::new(Some(keystore).into()); + let key_store: Arc> = + Arc::new(Some(keystore).into()); BeefyWorker { backend: backend.clone(), runtime: api.clone(), @@ -1233,13 +1260,14 @@ pub(crate) mod tests { Default::default(), Digest::default(), ); - let mut oracle = VoterOracle:: { + let mut oracle = VoterOracle:: { best_beefy_block: 0, best_grandpa_block_header: header, min_block_delta: 1, sessions: VecDeque::new(), + _phantom: PhantomData, }; - let voting_target_with = |oracle: &mut VoterOracle, + let voting_target_with = |oracle: &mut VoterOracle, best_beefy: NumberFor, best_grandpa: NumberFor| -> Option> { @@ -1295,18 +1323,20 @@ pub(crate) mod tests { Default::default(), Digest::default(), ); - let mut oracle = VoterOracle:: { + let mut oracle = VoterOracle:: { best_beefy_block: 0, best_grandpa_block_header: header, min_block_delta: 1, sessions: VecDeque::new(), + _phantom: PhantomData, }; - let accepted_interval_with = |oracle: &mut VoterOracle, - best_grandpa: NumberFor| - -> Result<(NumberFor, NumberFor), Error> { - oracle.best_grandpa_block_header.number = best_grandpa; - oracle.accepted_interval() - }; + let accepted_interval_with = + |oracle: &mut VoterOracle, + best_grandpa: NumberFor| + -> Result<(NumberFor, NumberFor), Error> { + oracle.best_grandpa_block_header.number = best_grandpa; + oracle.accepted_interval() + }; // rounds not initialized -> should accept votes: `None` assert!(accepted_interval_with(&mut oracle, 1).is_err()); @@ -1377,18 +1407,19 @@ pub(crate) mod tests { ); // verify empty digest shows nothing - assert!(find_authorities_change::(&header).is_none()); + assert!(find_authorities_change::(&header).is_none()); let peers = &[Keyring::One, Keyring::Two]; let id = 42; let validator_set = ValidatorSet::new(make_beefy_ids(peers), id).unwrap(); header.digest_mut().push(DigestItem::Consensus( BEEFY_ENGINE_ID, - ConsensusLog::::AuthoritiesChange(validator_set.clone()).encode(), + ConsensusLog::::AuthoritiesChange(validator_set.clone()) + .encode(), )); // verify validator set is correctly extracted from digest - let extracted = find_authorities_change::(&header); + let extracted = find_authorities_change::(&header); assert_eq!(extracted, Some(validator_set)); } diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index f70434beab3..913184402ae 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -50,7 +50,7 @@ use alloc::vec::Vec; use codec::{Codec, Decode, Encode}; use core::fmt::{Debug, Display}; use scale_info::TypeInfo; -use sp_application_crypto::{AppCrypto, AppPublic, ByteArray, RuntimeAppPublic}; +use sp_application_crypto::{AppPublic, RuntimeAppPublic}; use sp_core::H256; use sp_runtime::{ traits::{Hash, Keccak256, NumberFor}, @@ -76,17 +76,13 @@ pub type BeefySignatureHasher = sp_runtime::traits::Keccak256; /// A trait bound which lists all traits which are required to be implemented by /// a BEEFY AuthorityId type in order to be able to be used in BEEFY Keystore pub trait AuthorityIdBound: - Codec - + Debug - + Clone - + AsRef<[u8]> - + ByteArray + Ord + AppPublic - + AppCrypto - + RuntimeAppPublic + Display - + BeefyAuthorityId + + BeefyAuthorityId { + /// Necessary bounds on the Signature associated with the AuthorityId + type BoundedSignature: Debug + Eq + PartialEq + Clone + TypeInfo + Codec + Send + Sync; } /// BEEFY cryptographic types for ECDSA crypto @@ -127,7 +123,9 @@ pub mod ecdsa_crypto { } } } - impl AuthorityIdBound for AuthorityId {} + impl AuthorityIdBound for AuthorityId { + type BoundedSignature = Signature; + } } /// BEEFY cryptographic types for BLS crypto @@ -168,7 +166,9 @@ pub mod bls_crypto { BlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref()) } } - impl AuthorityIdBound for AuthorityId {} + impl AuthorityIdBound for AuthorityId { + type BoundedSignature = Signature; + } } /// BEEFY cryptographic types for (ECDSA,BLS) crypto pair @@ -216,7 +216,9 @@ pub mod ecdsa_bls_crypto { } } - impl AuthorityIdBound for AuthorityId {} + impl AuthorityIdBound for AuthorityId { + type BoundedSignature = Signature; + } } /// The `ConsensusEngineId` of BEEFY. diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index d8610ecfa5b..1403e4745ff 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -516,7 +516,7 @@ mod tests { let suri = "//Alice"; let pair = ecdsa_bls377::Pair::from_string(suri, None).unwrap(); - let msg = b"this should be a normal unhashed message not "; + let msg = b"this should be a normal unhashed message not a hash of a message because bls scheme comes with its own hashing"; // insert key, sign again store.insert(ECDSA_BLS377, suri, pair.public().as_ref()).unwrap(); -- GitLab From d539778c3cc4e0376769472fdad37856f4051dc5 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Thu, 30 May 2024 18:44:16 +0800 Subject: [PATCH 091/106] Fix broken windows build (#4636) Fixes #4625. Specifically, the `cfg` attribute `windows` refers to the compile target and not the build environment, and in the case of cross-compilation, the build environment and target can differ. However, the line modified is related to documentation generation, so there should be no critical issue with this change. --- substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 8f6c0c6f650..df1f5645f04 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -57,7 +57,7 @@ type UncheckedSignaturePayload = (Address, Signature, /// could in principle be any other interaction. Transactions are either signed or unsigned. A /// sensible transaction pool should ensure that only transactions that are worthwhile are /// considered for block-building. -#[cfg_attr(feature = "std", doc = simple_mermaid::mermaid!("../../docs/mermaid/extrinsics.mmd"))] +#[cfg_attr(all(feature = "std", not(windows)), doc = simple_mermaid::mermaid!("../../docs/mermaid/extrinsics.mmd"))] /// This type is by no means enforced within Substrate, but given its genericness, it is highly /// likely that for most use-cases it will suffice. Thus, the encoding of this type will dictate /// exactly what bytes should be sent to a runtime to transact with it. -- GitLab From 78c24ec9e24ea04b2f8513b53a8d1246ff6b35ed Mon Sep 17 00:00:00 2001 From: gupnik Date: Fri, 31 May 2024 07:39:12 +0530 Subject: [PATCH 092/106] Adds ability to specify chain type in chain-spec-builder (#4542) Currently, `chain-spec-builder` only creates a spec with `Live` chain type. This PR adds the ability to specify it while keeping the same default. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 1 + prdoc/pr_4542.prdoc | 13 +++++++++++++ substrate/bin/utils/chain-spec-builder/Cargo.toml | 2 +- substrate/bin/utils/chain-spec-builder/src/lib.rs | 9 +++++++-- substrate/client/chain-spec/Cargo.toml | 1 + substrate/client/chain-spec/src/lib.rs | 2 ++ 6 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_4542.prdoc diff --git a/Cargo.lock b/Cargo.lock index 781dba880cb..e50baf6e668 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16709,6 +16709,7 @@ name = "sc-chain-spec" version = "28.0.0" dependencies = [ "array-bytes", + "clap 4.5.3", "docify", "log", "memmap2 0.9.3", diff --git a/prdoc/pr_4542.prdoc b/prdoc/pr_4542.prdoc new file mode 100644 index 00000000000..faaf9dc2c28 --- /dev/null +++ b/prdoc/pr_4542.prdoc @@ -0,0 +1,13 @@ +title: "Adds ability to specify chain type in chain-spec-builder" + +doc: + - audience: Node Operator + description: | + Currently, `chain-spec-builder` only creates a spec with Live chain type. This PR adds the + ability to specify it while keeping the same default. + +crates: + - name: staging-chain-spec-builder + bump: patch + - name: sc-chain-spec + bump: patch diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index cc9aa402fd1..de06bbb3fff 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -26,6 +26,6 @@ crate-type = ["rlib"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } log = { workspace = true, default-features = true } -sc-chain-spec = { path = "../../../client/chain-spec" } +sc-chain-spec = { path = "../../../client/chain-spec", features = ["clap"] } serde_json = { workspace = true, default-features = true } sp-tracing = { path = "../../../primitives/tracing" } diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 167704d3633..0f7c003fc8c 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -120,7 +120,7 @@ use std::{fs, path::PathBuf}; use clap::{Parser, Subcommand}; -use sc_chain_spec::{GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; +use sc_chain_spec::{ChainType, GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; use serde_json::Value; /// A utility to easily create a chain spec definition. @@ -154,6 +154,9 @@ pub struct CreateCmd { /// The chain id. #[arg(long, short = 'i', default_value = "custom")] chain_id: String, + /// The chain type. + #[arg(value_enum, short = 't', default_value = "live")] + chain_type: ChainType, /// The path to runtime wasm blob. #[arg(long, short)] runtime_wasm_path: PathBuf, @@ -261,10 +264,12 @@ pub fn generate_chain_spec_for_runtime(cmd: &CreateCmd) -> Result::builder(&code[..], Default::default()) .with_name(&cmd.chain_name[..]) .with_id(&cmd.chain_id[..]) - .with_chain_type(sc_chain_spec::ChainType::Live); + .with_chain_type(chain_type.clone()); let builder = match cmd.action { GenesisBuildAction::NamedPreset(NamedPresetCmd { ref preset_name }) => diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 9028a2c49ee..5b411b642a0 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,6 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +clap = { version = "4.5.3", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } memmap2 = "0.9.3" serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index 066a0ab9e2a..653c3c618b7 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -352,6 +352,7 @@ use sp_runtime::BuildStorage; /// This can be used by tools to determine the type of a chain for displaying /// additional information or enabling additional features. #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] +#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] pub enum ChainType { /// A development chain that runs mainly on one node. Development, @@ -360,6 +361,7 @@ pub enum ChainType { /// A live chain. Live, /// Some custom chain type. + #[cfg_attr(feature = "clap", clap(skip))] Custom(String), } -- GitLab From 71f4f5a80bb9ef00d651c62a58c6e8192d4d9707 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 31 May 2024 12:58:05 +0800 Subject: [PATCH 093/106] Update `runtime_type` ref doc with the new "Associated Type Bounds" (#4624) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Kรถcher --- docs/sdk/src/reference_docs/frame_runtime_types.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs index 32cda5bc534..1eed9857a1d 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_types.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -102,6 +102,10 @@ //! bounds, such as being [`frame::traits::IsSubType`]: #![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call_usages)] //! +//! > Once Rust's "_Associated Type Bounds RFC_" is usable, this syntax can be used to +//! > simplify the above scenario. See [this](https://github.com/paritytech/polkadot-sdk/issues/3743) +//! > issue for more information. +//! //! ### Asserting Equality of Multiple Runtime Composite Enums //! //! Recall that in the above example, `::RuntimeCall` and ` Date: Fri, 31 May 2024 09:34:43 +0300 Subject: [PATCH 094/106] collator-protocol: remove `elastic-scaling-experimental` feature (#4595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Validators already have been upgraded so they could already receive the new `CollationWithParentHeadData` response when fetching collation. However this is only sent by collators when the parachain has more than 1 core is assigned. TODO: - [x] PRDoc --------- Signed-off-by: Andrei Sandu Co-authored-by: Bastian Kรถcher --- cumulus/polkadot-parachain/Cargo.toml | 1 - .../node/network/collator-protocol/Cargo.toml | 1 - .../src/collator_side/mod.rs | 8 ------ .../src/collator_side/tests/mod.rs | 1 - .../tests/prospective_parachains.rs | 1 - polkadot/node/service/Cargo.toml | 4 --- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../undying/collator/Cargo.toml | 2 +- prdoc/pr_4595.prdoc | 25 +++++++++++++++++++ 9 files changed, 27 insertions(+), 18 deletions(-) create mode 100644 prdoc/pr_4595.prdoc diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index a22606edb6c..def7d95fd56 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -172,4 +172,3 @@ try-runtime = [ "sp-runtime/try-runtime", ] fast-runtime = ["bridge-hub-rococo-runtime/fast-runtime"] -elastic-scaling-experimental = ["polkadot-service/elastic-scaling-experimental"] diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index c02999a59b5..d7291552738 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -45,4 +45,3 @@ polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } [features] default = [] -elastic-scaling-experimental = [] diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 88375d58309..80a85420b39 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -924,7 +924,6 @@ async fn send_collation( let peer_id = request.peer_id(); let candidate_hash = receipt.hash(); - #[cfg(feature = "elastic-scaling-experimental")] let result = match parent_head_data { ParentHeadData::WithData { head_data, .. } => Ok(request_v2::CollationFetchingResponse::CollationWithParentHeadData { @@ -935,13 +934,6 @@ async fn send_collation( ParentHeadData::OnlyHash(_) => Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), }; - #[cfg(not(feature = "elastic-scaling-experimental"))] - let result = { - // suppress unused warning - let _parent_head_data = parent_head_data; - - Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)) - }; let response = OutgoingResponse { result, reputation_changes: Vec::new(), sent_feedback: Some(tx) }; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 689e03ce473..412792bbecf 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -144,7 +144,6 @@ impl Default for TestState { impl TestState { /// Adds a few more scheduled cores to the state for the same para id /// compared to the default. - #[cfg(feature = "elastic-scaling-experimental")] pub fn with_elastic_scaling() -> Self { let mut state = Self::default(); let para_id = state.para_id; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 2a147aef69e..0a0a85fb1f2 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -443,7 +443,6 @@ fn distribute_collation_up_to_limit() { /// Tests that collator send the parent head data in /// case the para is assigned to multiple cores (elastic scaling). #[test] -#[cfg(feature = "elastic-scaling-experimental")] fn send_parent_head_data_for_elastic_scaling() { let test_state = TestState::with_elastic_scaling(); diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 37836f134bd..0dfdf926b1b 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -238,7 +238,3 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -elastic-scaling-experimental = [ - "polkadot-collator-protocol?/elastic-scaling-experimental", -] diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index dbc8507d599..f9aaab74deb 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -24,7 +24,7 @@ log = { workspace = true, default-features = true } test-parachain-adder = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 28efdbbf242..08d1e74d879 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -24,7 +24,7 @@ log = { workspace = true, default-features = true } test-parachain-undying = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/prdoc/pr_4595.prdoc b/prdoc/pr_4595.prdoc new file mode 100644 index 00000000000..8baa6e8a91f --- /dev/null +++ b/prdoc/pr_4595.prdoc @@ -0,0 +1,25 @@ +title: "Remove `elastic-scaling-experimental` feature flag" + +doc: + - audience: Node Dev + description: | + The feature was masking the ability of collators to respond with `CollationWithParentHeadData` + to validator collation fetch requests, a requirement for elastic scaling. + Please note that `CollationWithParentHeadData` is only sent by collators of parachains with + multiple cores assigned, otherwise collators must respond with `CollationFetchingResponse::Collation` + - audience: Node Operator + description: | + This change enables elastic scaling support in collators. Please upgrade to latest version, + otherwise validator nodes will not be able to back elastic parachain blocks leading to + missed rewards. + +crates: + - name: polkadot-collator-protocol + bump: major + validate: false + - name: polkadot-service + bump: major + validate: false + - name: polkadot-parachain-bin + bump: minor + validate: false -- GitLab From 8d8c0e13a7dc8d067367ac55fb142b12ac8a6d13 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Fri, 31 May 2024 12:15:48 +0200 Subject: [PATCH 095/106] Use Unlicense for templates (#4628) Addresses [this](https://github.com/paritytech/polkadot-sdk/issues/3155#issuecomment-2134411391). --- templates/minimal/Cargo.toml | 2 +- templates/minimal/LICENSE | 24 +++++++++++++ templates/minimal/node/Cargo.toml | 2 +- templates/minimal/pallets/template/Cargo.toml | 2 +- templates/minimal/runtime/Cargo.toml | 2 +- templates/parachain/node/Cargo.toml | 2 +- .../parachain/pallets/template/Cargo.toml | 2 +- templates/parachain/runtime/Cargo.toml | 2 +- templates/solochain/LICENSE | 34 ++++++++++++------- templates/solochain/node/Cargo.toml | 2 +- .../solochain/pallets/template/Cargo.toml | 2 +- templates/solochain/runtime/Cargo.toml | 2 +- 12 files changed, 55 insertions(+), 23 deletions(-) create mode 100644 templates/minimal/LICENSE diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml index 95656ff92d2..ca00cb84284 100644 --- a/templates/minimal/Cargo.toml +++ b/templates/minimal/Cargo.toml @@ -2,7 +2,7 @@ name = "minimal-template" description = "A minimal template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/minimal/LICENSE b/templates/minimal/LICENSE new file mode 100644 index 00000000000..cf1ab25da03 --- /dev/null +++ b/templates/minimal/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index f732eff445c..d07c7b6dd9b 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -2,7 +2,7 @@ name = "minimal-template-node" description = "A minimal Substrate-based Substrate node, ready for hacking." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 30962664481..f0abe3c6942 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-minimal-template" description = "A minimal pallet built with FRAME, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index 3581ca7c851..ab6a48b73f3 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "minimal-template-runtime" description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 4fe228f71fe..94873cf1fae 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -2,7 +2,7 @@ name = "parachain-template-node" description = "A parachain node template built with Substrate and Cumulus, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index f5411c02821..6c549c2c4a9 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-parachain-template" description = "FRAME pallet template for defining custom runtime logic." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index e88284bedb6..059c7936796 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "parachain-template-runtime" description = "A parachain runtime template built with Substrate and Cumulus, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/solochain/LICENSE b/templates/solochain/LICENSE index ffa0b3f2df0..cf1ab25da03 100644 --- a/templates/solochain/LICENSE +++ b/templates/solochain/LICENSE @@ -1,16 +1,24 @@ -MIT No Attribution +This is free and unencumbered software released into the public domain. -Copyright Parity Technologies (UK) Ltd. +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so. +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 515f85e5418..4e8b8184090 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -2,7 +2,7 @@ name = "solochain-template-node" description = "A solochain node template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index 8c6f26d8e5d..5b8349b5d67 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-template" description = "FRAME pallet template for defining custom runtime logic." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 8aeb1a6a16e..0af3899a666 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "solochain-template-runtime" description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true -- GitLab From fc6c31829fc2e24e11a02b6a2adec27bc5d8918f Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Fri, 31 May 2024 17:38:56 +0200 Subject: [PATCH 096/106] Implement `XcmPaymentApi` and `DryRunApi` on all system parachains (#4634) Depends on https://github.com/paritytech/polkadot-sdk/pull/4621. Implemented the [`XcmPaymentApi`](https://github.com/paritytech/polkadot-sdk/pull/3607) and [`DryRunApi`](https://github.com/paritytech/polkadot-sdk/pull/3872) on all system parachains. More scenarios can be tested on both rococo and westend if all system parachains implement this APIs. The objective is for all XCM-enabled runtimes to implement them. After demonstrating fee estimation in a UI on the testnets, come the fellowship runtimes. Step towards https://github.com/paritytech/polkadot-sdk/issues/690. --- Cargo.lock | 8 +++ .../assets/asset-hub-rococo/src/lib.rs | 13 ++--- .../assets/asset-hub-westend/src/lib.rs | 16 ++---- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 3 ++ .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 50 +++++++++++++++++- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 3 ++ .../bridge-hubs/bridge-hub-westend/src/lib.rs | 51 ++++++++++++++++++- .../collectives-westend/Cargo.toml | 3 ++ .../collectives-westend/src/lib.rs | 50 +++++++++++++++++- .../contracts/contracts-rococo/Cargo.toml | 3 ++ .../contracts/contracts-rococo/src/lib.rs | 49 +++++++++++++++++- .../coretime/coretime-rococo/Cargo.toml | 3 ++ .../coretime/coretime-rococo/src/lib.rs | 50 +++++++++++++++++- .../coretime/coretime-westend/Cargo.toml | 3 ++ .../coretime/coretime-westend/src/lib.rs | 50 +++++++++++++++++- .../runtimes/people/people-rococo/Cargo.toml | 3 ++ .../runtimes/people/people-rococo/src/lib.rs | 50 +++++++++++++++++- .../runtimes/people/people-westend/Cargo.toml | 3 ++ .../runtimes/people/people-westend/src/lib.rs | 50 +++++++++++++++++- .../runtimes/testing/penpal/src/lib.rs | 13 ++--- polkadot/runtime/rococo/src/lib.rs | 16 ++---- polkadot/runtime/westend/src/lib.rs | 16 ++---- polkadot/xcm/pallet-xcm/src/lib.rs | 15 ++++++ prdoc/pr_4634.prdoc | 34 +++++++++++++ 24 files changed, 481 insertions(+), 74 deletions(-) create mode 100644 prdoc/pr_4634.prdoc diff --git a/Cargo.lock b/Cargo.lock index e50baf6e668..e1d8f209283 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2099,6 +2099,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "tuplex", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -2259,6 +2260,7 @@ dependencies = [ "testnet-parachains-constants", "tuplex", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -2868,6 +2870,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -3123,6 +3126,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -3219,6 +3223,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -3283,6 +3288,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -12281,6 +12287,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -12381,6 +12388,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index e3a106c6ab9..1fc67ba0c30 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -98,7 +98,7 @@ use xcm::latest::prelude::{ }; use xcm::{ latest::prelude::{AssetId, BodyId}, - IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_fee_payment_runtime_api::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, @@ -1295,15 +1295,8 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index ececae3ef0a..d9249cdfc48 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -85,10 +85,7 @@ pub use sp_runtime::BuildStorage; use assets_common::{foreign_creators::ForeignCreators, matching::FromSiblingParachain}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::{ - prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, - IntoVersion, -}; +use xcm::prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; // We exclude `Assets` since it's the name of a pallet use xcm::latest::prelude::AssetId; @@ -1331,15 +1328,8 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::WestendLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index af243998d43..253a21f5d0b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -66,6 +66,7 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -220,6 +221,7 @@ std = [ "tuplex/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -262,6 +264,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 0c72b000c2a..e7868bcbc78 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -70,7 +70,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -97,7 +97,11 @@ pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; -use xcm::latest::prelude::*; +use xcm::prelude::*; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -962,6 +966,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 4a58528498d..0f16d629fc2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -62,6 +62,7 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -185,6 +186,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -219,6 +221,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 90190da82dd..e26d490f9ac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -64,7 +64,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -75,13 +75,18 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; + use bp_runtime::HeaderId; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::prelude::*; +use xcm::prelude::*; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -711,6 +716,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 58985d71a50..fe4de3114be 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -66,6 +66,7 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } # Cumulus @@ -130,6 +131,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -236,6 +238,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 29ba88df104..5fce8e50954 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -73,7 +73,7 @@ use frame_support::{ fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -103,7 +103,11 @@ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{ impls::VersionedLocatableAsset, BlockHashCount, SlowAdjustingFeeUpdate, }; -use xcm::latest::{prelude::*, BodyId}; +use xcm::prelude::*; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -936,6 +940,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::WndLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index c9dd279e9c0..e43a69482c7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -63,6 +63,7 @@ rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/cons xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -140,6 +141,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -169,6 +171,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 1222e11e9a6..2d346e66c6c 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -51,7 +51,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::limits::{BlockLength, BlockWeights}; @@ -62,7 +62,12 @@ use parachains_common::{ }; pub use parachains_common::{AuraId, Balance}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; +use xcm::prelude::*; use xcm_config::CollatorSelectionUpdateOrigin; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -585,6 +590,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index ad85aab1f8a..dc99fe331f7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -62,6 +62,7 @@ rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/cons xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -138,6 +139,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -167,6 +169,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index b7880279048..b3eaf3d127a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -41,7 +41,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -72,10 +72,14 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::*; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, RocRelayLocation, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -656,6 +660,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RocRelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 4611228da29..78018537f5d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -61,6 +61,7 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -137,6 +138,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -165,6 +167,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 78b963e3b40..6c22702ce87 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -41,7 +41,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -72,10 +72,14 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::*; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, TokenRelayLocation, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -647,6 +651,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::TokenRelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index a29d6db58fe..d4e65da3cd6 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -59,6 +59,7 @@ rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/cons xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -135,6 +136,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -163,6 +165,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 5cd8aa357c3..c80f6879fb3 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -32,7 +32,7 @@ use frame_support::{ traits::{ ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -65,11 +65,15 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::BodyId; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -621,6 +625,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index b72675900fd..b040613d19e 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -59,6 +59,7 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -135,6 +136,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -163,6 +165,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index af6b5be4469..06c938b8a40 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -32,7 +32,7 @@ use frame_support::{ traits::{ ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -65,11 +65,15 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::BodyId; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -621,6 +625,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 7e4a013117b..e77416e6cd5 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -83,7 +83,7 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use xcm::{ latest::prelude::{AssetId as AssetLocationId, BodyId}, - IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_fee_payment_runtime_api::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, @@ -849,15 +849,8 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetLocationId(xcm_config::RelayLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetLocationId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c2614f7e96e..a77c0188a1d 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -106,10 +106,7 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{ - latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, - VersionedXcm, -}; +use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -1772,15 +1769,8 @@ sp_api::impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index e6790329959..71ff1255907 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -102,10 +102,7 @@ use sp_std::{ #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{ - latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, - VersionedXcm, -}; +use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; use xcm_builder::PayOverXcm; use xcm_fee_payment_runtime_api::{ @@ -2234,15 +2231,8 @@ sp_api::impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 160d5273968..8f67e6e7d94 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -2517,6 +2517,21 @@ impl Pallet { Ok(XcmDryRunEffects { forwarded_xcms, emitted_events: events, execution_result: result }) } + /// Given a list of asset ids, returns the correct API response for + /// `XcmPaymentApi::query_acceptable_payment_assets`. + /// + /// The assets passed in have to be supported for fee payment. + pub fn query_acceptable_payment_assets( + version: xcm::Version, + asset_ids: Vec, + ) -> Result, XcmPaymentApiError> { + Ok(asset_ids + .into_iter() + .map(|asset_id| VersionedAssetId::from(asset_id)) + .filter_map(|asset_id| asset_id.into_version(version).ok()) + .collect()) + } + pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { let message = Xcm::<()>::try_from(message) .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; diff --git a/prdoc/pr_4634.prdoc b/prdoc/pr_4634.prdoc new file mode 100644 index 00000000000..0c16dedeae1 --- /dev/null +++ b/prdoc/pr_4634.prdoc @@ -0,0 +1,34 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement XcmPaymentApi and DryRunApi on all system parachains + +doc: + - audience: Runtime User + description: | + The new XcmPaymentApi and DryRunApi have been implement on all westend and rococo system parachains. + You can test them out. + - audience: Runtime Dev + description: | + The new XcmPaymentApi and DryRunApi have been implement on all westend and rococo system parachains. + These can be used to build UIs that estimate XCM execution and sending, using libraries like PAPI or PJS. + +crates: + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: contracts-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor -- GitLab From f81751e0ce56b0ef50b3a0b5aa0ff4fb16c9ea37 Mon Sep 17 00:00:00 2001 From: gupnik Date: Mon, 3 Jun 2024 00:09:47 +0530 Subject: [PATCH 097/106] Better error for missing index in CRV2 (#4643) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes https://github.com/paritytech/polkadot-sdk/issues/4552 --------- Co-authored-by: command-bot <> Co-authored-by: Bastian Kรถcher --- .../procedural/src/runtime/parse/mod.rs | 15 ++++++----- .../tests/runtime_ui/missing_pallet_index.rs | 27 +++++++++++++++++++ .../runtime_ui/missing_pallet_index.stderr | 5 ++++ 3 files changed, 41 insertions(+), 6 deletions(-) create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs create mode 100644 substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr diff --git a/substrate/frame/support/procedural/src/runtime/parse/mod.rs b/substrate/frame/support/procedural/src/runtime/parse/mod.rs index 893cb4726e2..dd83cd0da90 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/mod.rs @@ -152,8 +152,7 @@ impl Def { let mut pallets = vec![]; for item in items.iter_mut() { - let mut pallet_item = None; - let mut pallet_index = 0; + let mut pallet_index_and_item = None; let mut disable_call = false; let mut disable_unsigned = false; @@ -170,9 +169,8 @@ impl Def { runtime_types = Some(types); }, RuntimeAttr::PalletIndex(span, index) => { - pallet_index = index; - pallet_item = if let syn::Item::Type(item) = item { - Some(item.clone()) + pallet_index_and_item = if let syn::Item::Type(item) = item { + Some((index, item.clone())) } else { let msg = "Invalid runtime::pallet_index, expected type definition"; return Err(syn::Error::new(span, msg)) @@ -187,7 +185,7 @@ impl Def { } } - if let Some(pallet_item) = pallet_item { + if let Some((pallet_index, pallet_item)) = pallet_index_and_item { match *pallet_item.ty.clone() { syn::Type::Path(ref path) => { let pallet_decl = @@ -230,6 +228,11 @@ impl Def { }, _ => continue, } + } else { + if let syn::Item::Type(item) = item { + let msg = "Missing pallet index for pallet declaration. Please add `#[runtime::pallet_index(...)]`"; + return Err(syn::Error::new(item.span(), &msg)) + } } } diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs new file mode 100644 index 00000000000..469a7833e5a --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; + + pub type System = frame_system; +} + +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr new file mode 100644 index 00000000000..a2cbaa48199 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr @@ -0,0 +1,5 @@ +error: Missing pallet index for pallet declaration. Please add `#[runtime::pallet_index(...)]` + --> tests/runtime_ui/missing_pallet_index.rs:24:5 + | +24 | pub type System = frame_system; + | ^^^ -- GitLab From 5779ec5b775f86fb86be02783ab5c02efbf307ca Mon Sep 17 00:00:00 2001 From: tugy <33746108+tugytur@users.noreply.github.com> Date: Sun, 2 Jun 2024 22:11:23 +0200 Subject: [PATCH 098/106] update amforc westend and its parachain bootnodes (#4641) Tested each bootnode with `--reserved-only --reserved-nodes` --- cumulus/parachains/chain-specs/asset-hub-westend.json | 4 ++-- cumulus/parachains/chain-specs/bridge-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/collectives-westend.json | 4 ++-- cumulus/parachains/chain-specs/coretime-westend.json | 4 +++- polkadot/node/service/chain-specs/paseo.json | 4 ++-- polkadot/node/service/chain-specs/westend.json | 4 ++-- 6 files changed, 14 insertions(+), 10 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 830eb2c5918..b4334bdfe12 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -19,8 +19,8 @@ "/dns/westmint-bootnode.turboflakes.io/tcp/30425/wss/p2p/12D3KooWHU4qqSyqKdbXdrCTMXUJxxueaZjqpqSaQqYiFPw6XqEx", "/dns/boot-node.helikon.io/tcp/10200/p2p/12D3KooWMRY8wb7rMT81LLuivvsy6ahUxKHQgYJw4zm1hC1uYLxb", "/dns/boot-node.helikon.io/tcp/10202/wss/p2p/12D3KooWMRY8wb7rMT81LLuivvsy6ahUxKHQgYJw4zm1hC1uYLxb", - "/dns/westmint.bootnode.amforc.com/tcp/30339/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", - "/dns/westmint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", + "/dns/asset-hub-westend.bootnode.amforc.com/tcp/30004/p2p/12D3KooWDfepM7kqUHMXdGqJw3ZmtvAcE2CjPcnYjT2tTfAw3ZBd", + "/dns/asset-hub-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDfepM7kqUHMXdGqJw3ZmtvAcE2CjPcnYjT2tTfAw3ZBd", "/dns/westmint-boot-ng.dwellir.com/tcp/30345/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index c07857894f7..f98a046040f 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -27,7 +27,9 @@ "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", - "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", + "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6", + "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index 8680e3a7671..6182218d367 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -19,8 +19,8 @@ "/dns/collectives-westend-bootnode.turboflakes.io/tcp/30700/wss/p2p/12D3KooWAe9CFXp6je3TAPQJE135KRemTLSqEqQBZMFwJontrThZ", "/dns/boot-node.helikon.io/tcp/10260/p2p/12D3KooWMzfnt29VAmrJHQcJU6Vfn4RsMbqPqgyWHqt9VTTAbSrL", "/dns/boot-node.helikon.io/tcp/10262/wss/p2p/12D3KooWMzfnt29VAmrJHQcJU6Vfn4RsMbqPqgyWHqt9VTTAbSrL", - "/dns/collectives-westend.bootnode.amforc.com/tcp/30340/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", - "/dns/collectives-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", + "/dns/collectives-westend.bootnode.amforc.com/tcp/30010/p2p/12D3KooWRfefWRo1AAB8LCJhVr8DDe9CvBmmKUzJpjd2RGk82pnL", + "/dns/collectives-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWRfefWRo1AAB8LCJhVr8DDe9CvBmmKUzJpjd2RGk82pnL", "/dns/collectives-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWMAgVm1PnsLVfxoDLCbYv1DgnN6tjcRQbrq8xhbwo4whE", "/dns/collectives-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWMAgVm1PnsLVfxoDLCbYv1DgnN6tjcRQbrq8xhbwo4whE", "/dns/westend-collectives-boot-ng.dwellir.com/tcp/30340/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index 586879b9abc..ca723aacd88 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -28,7 +28,9 @@ "/dns/boot.gatotech.network/tcp/33350/p2p/12D3KooWN6FJDaZvWbtX1pSc6UdHgyF2UZtYxPp3UkXQZa8ko7uS", "/dns/boot.gatotech.network/tcp/35350/wss/p2p/12D3KooWN6FJDaZvWbtX1pSc6UdHgyF2UZtYxPp3UkXQZa8ko7uS", "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30358/wss/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", - "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30356/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf" + "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30356/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", + "/dns/coretime-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd", + "/dns/coretime-westend.bootnode.amforc.com/tcp/30013/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index 5a67ddcd4c4..e307d5213a3 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -3,8 +3,8 @@ "id": "paseo", "chainType": "Live", "bootNodes": [ - "/dns/paseo.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWFD81HC9memUwuGMLvhDDEfmXjn6jC4n7zyNs3vToXapS", - "/dns/paseo.bootnode.amforc.com/tcp/30344/p2p/12D3KooWFD81HC9memUwuGMLvhDDEfmXjn6jC4n7zyNs3vToXapS", + "/dns/paseo.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWSdf63rZjtGdeWXpQwQwPh8K8c22upcB3B1VmqW8rxrjw", + "/dns/paseo.bootnode.amforc.com/tcp/30001/p2p/12D3KooWSdf63rZjtGdeWXpQwQwPh8K8c22upcB3B1VmqW8rxrjw", "/dns/boot.stake.plus/tcp/43334/wss/p2p/12D3KooWNhgAC3hjZHxaT52EpPFZohkCL1AHFAijqcN8xB9Rwud2", "/dns/boot.stake.plus/tcp/43333/p2p/12D3KooWNhgAC3hjZHxaT52EpPFZohkCL1AHFAijqcN8xB9Rwud2", "/dns/boot.metaspan.io/tcp/36017/wss/p2p/12D3KooWSW6nDfM3SS8rUtjMyjdszivK31bu4a1sRngGa2hFETz7", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 16bc7ff07b0..1bfb5ba334c 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -12,8 +12,8 @@ "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", - "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", + "/dns/westend.bootnode.amforc.com/tcp/30001/p2p/12D3KooWAPmR7rbm2axPjHzF51yvQNDM5GvWfkF5BTV44Y5vJ3ct", + "/dns/westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWAPmR7rbm2axPjHzF51yvQNDM5GvWfkF5BTV44Y5vJ3ct", "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/boot.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", -- GitLab From 795bc77d662de08599670aed9556430379a66ffa Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Mon, 3 Jun 2024 09:27:00 +0200 Subject: [PATCH 099/106] [Pools] Refactors and runtime apis for DelegateStake (#4537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Runtime Apis Introduces the following runtime apis to facilitate dapps and wallets in integrating with the `DelegateStake` functionalities of the pools (related: https://github.com/paritytech/polkadot-sdk/pull/3905). These apis are meant to support pool and member migration, as well as lazy application of pending slashes of pool members. ```rust fn pool_pending_slash(pool_id: PoolId) -> Balance; fn member_pending_slash(member: AccountId) -> Balance; fn pool_needs_delegate_migration(pool_id: PoolId) -> bool; fn member_needs_delegate_migration(member: AccountId) -> bool; ``` ## Refactors - Introduces newtypes for `Agent`, `Delegator`, `Pool` and `[Pool]Member`. And refactors `StakeAdapter` and `DelegationInterface` to accept the above types. This will help make these apis typesafe against using wrong account type. - Fixing `DelegationInterface` apis to return optional (instead of default value if key does not exist). - Rename struct `Agent` that wraps `AgentLedger` to `AgentOuterLedger` which is clearer (naming wise) and different from the newtype `Agent`. - Cleaning up new Pool events (related to `Delegation` feature of pool). --------- Signed-off-by: Matteo Muraca Signed-off-by: Alexandru Gheorghe Signed-off-by: Andrei Sandu Signed-off-by: Adrian Catangiu Signed-off-by: Alexandru Vasile Signed-off-by: Oliver Tale-Yazdi Signed-off-by: divdeploy Signed-off-by: dependabot[bot] Signed-off-by: hongkuang Co-authored-by: Bastian Kรถcher Co-authored-by: gemini132 <164285545+gemini132@users.noreply.github.com> Co-authored-by: Matteo Muraca <56828990+muraca@users.noreply.github.com> Co-authored-by: Liam Aharon Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Co-authored-by: Alessandro Siniscalchi Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Co-authored-by: Ross Bulat Co-authored-by: Serban Iorga Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Co-authored-by: Sam Johnson Co-authored-by: Adrian Catangiu Co-authored-by: Javier Viola <363911+pepoviola@users.noreply.github.com> Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Niklas Adolfsson Co-authored-by: Dastan <88332432+dastansam@users.noreply.github.com> Co-authored-by: Clara van Staden Co-authored-by: Ron Co-authored-by: Vincent Geddes Co-authored-by: Svyatoslav Nikolsky Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Dino Paฤandi <3002868+Dinonard@users.noreply.github.com> Co-authored-by: Andrei Eres Co-authored-by: Alin Dima Co-authored-by: Andrei Sandu Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Bastian Kรถcher Co-authored-by: Branislav Kontur Co-authored-by: Sebastian Kunert Co-authored-by: gupnik Co-authored-by: Vladimir Istyufeev Co-authored-by: Lulu Co-authored-by: Juan Girini Co-authored-by: Francisco Aguirre Co-authored-by: Dรณnal Murray Co-authored-by: Shawn Tabrizi Co-authored-by: Kutsal Kaan Bilgin Co-authored-by: Ermal Kaleci Co-authored-by: ordian Co-authored-by: divdeploy <166095818+divdeploy@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Co-authored-by: Squirrel Co-authored-by: HongKuang <166261675+HongKuang@users.noreply.github.com> Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Egor_P Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> Co-authored-by: Dmitry Markin Co-authored-by: Alexandru Vasile Co-authored-by: Lรฉa Narzis <78718413+lean-apple@users.noreply.github.com> Co-authored-by: Gonรงalo Pestana Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Co-authored-by: command-bot <> Co-authored-by: PG Herveou Co-authored-by: jimwfs Co-authored-by: jimwfs <169986508+jimwfs@users.noreply.github.com> Co-authored-by: polka.dom --- polkadot/runtime/westend/src/lib.rs | 16 ++ prdoc/pr_4537.prdoc | 27 +++ substrate/bin/node/runtime/src/lib.rs | 16 ++ .../frame/delegated-staking/src/impls.rs | 75 +++--- substrate/frame/delegated-staking/src/lib.rs | 170 +++++++------ substrate/frame/delegated-staking/src/mock.rs | 14 +- .../frame/delegated-staking/src/tests.rs | 161 ++++++++---- .../frame/delegated-staking/src/types.rs | 19 +- .../benchmarking/src/inner.rs | 91 +++---- .../nomination-pools/runtime-api/src/lib.rs | 25 ++ .../frame/nomination-pools/src/adapter.rs | 207 ++++++++++------ substrate/frame/nomination-pools/src/lib.rs | 229 ++++++++++++------ .../frame/nomination-pools/src/migration.rs | 22 +- substrate/frame/nomination-pools/src/tests.rs | 6 + .../test-delegate-stake/src/lib.rs | 33 ++- .../test-delegate-stake/src/mock.rs | 35 +-- substrate/primitives/staking/src/lib.rs | 85 ++++--- 17 files changed, 800 insertions(+), 431 deletions(-) create mode 100644 prdoc/pr_4537.prdoc diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 71ff1255907..bcdb00c7633 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2287,6 +2287,22 @@ sp_api::impl_runtime_apis! { fn balance_to_points(pool_id: pallet_nomination_pools::PoolId, new_funds: Balance) -> Balance { NominationPools::api_balance_to_points(pool_id, new_funds) } + + fn pool_pending_slash(pool_id: pallet_nomination_pools::PoolId) -> Balance { + NominationPools::api_pool_pending_slash(pool_id) + } + + fn member_pending_slash(member: AccountId) -> Balance { + NominationPools::api_member_pending_slash(member) + } + + fn pool_needs_delegate_migration(pool_id: pallet_nomination_pools::PoolId) -> bool { + NominationPools::api_pool_needs_delegate_migration(pool_id) + } + + fn member_needs_delegate_migration(member: AccountId) -> bool { + NominationPools::api_member_needs_delegate_migration(member) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { diff --git a/prdoc/pr_4537.prdoc b/prdoc/pr_4537.prdoc new file mode 100644 index 00000000000..0148c95fb4e --- /dev/null +++ b/prdoc/pr_4537.prdoc @@ -0,0 +1,27 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Runtime apis to help with delegate-stake based Nomination Pools. + +doc: + - audience: Runtime User + description: | + Introduces a new set of runtime apis to facilitate dapps and wallets to integrate with delegate-stake + functionalities of Nomination Pools. These apis support pool and member migration, as well as lazy application of + pending slashes of the pool members. + +crates: + - name: pallet-nomination-pools + bump: minor + - name: westend-runtime + bump: minor + - name: kitchensink-runtime + bump: minor + - name: pallet-delegated-staking + bump: minor + - name: sp-staking + bump: minor + - name: pallet-nomination-pools-benchmarking + bump: patch + - name: pallet-nomination-pools-runtime-api + bump: minor diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 801abc28d3d..8fb59a9d847 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2774,6 +2774,22 @@ impl_runtime_apis! { fn balance_to_points(pool_id: pallet_nomination_pools::PoolId, new_funds: Balance) -> Balance { NominationPools::api_balance_to_points(pool_id, new_funds) } + + fn pool_pending_slash(pool_id: pallet_nomination_pools::PoolId) -> Balance { + NominationPools::api_pool_pending_slash(pool_id) + } + + fn member_pending_slash(member: AccountId) -> Balance { + NominationPools::api_member_pending_slash(member) + } + + fn pool_needs_delegate_migration(pool_id: pallet_nomination_pools::PoolId) -> bool { + NominationPools::api_pool_needs_delegate_migration(pool_id) + } + + fn member_needs_delegate_migration(member: AccountId) -> bool { + NominationPools::api_member_needs_delegate_migration(member) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs index 032f6120642..9f5649672d7 100644 --- a/substrate/frame/delegated-staking/src/impls.rs +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -19,46 +19,46 @@ //! Implementations of public traits, namely [`DelegationInterface`] and [`OnStakingUpdate`]. use super::*; -use sp_staking::{DelegationInterface, DelegationMigrator, OnStakingUpdate}; +use sp_staking::{Agent, DelegationInterface, DelegationMigrator, Delegator, OnStakingUpdate}; impl DelegationInterface for Pallet { type Balance = BalanceOf; type AccountId = T::AccountId; /// Effective balance of the `Agent` account. - fn agent_balance(who: &Self::AccountId) -> Self::Balance { - Agent::::get(who) - .map(|agent| agent.ledger.effective_balance()) - .unwrap_or_default() + fn agent_balance(agent: Agent) -> Option { + AgentLedgerOuter::::get(&agent.get()) + .map(|a| a.ledger.effective_balance()) + .ok() } - fn delegator_balance(delegator: &Self::AccountId) -> Self::Balance { - Delegation::::get(delegator).map(|d| d.amount).unwrap_or_default() + fn delegator_balance(delegator: Delegator) -> Option { + Delegation::::get(&delegator.get()).map(|d| d.amount) } /// Delegate funds to an `Agent`. fn delegate( - who: &Self::AccountId, - agent: &Self::AccountId, + who: Delegator, + agent: Agent, reward_account: &Self::AccountId, amount: Self::Balance, ) -> DispatchResult { Pallet::::register_agent( - RawOrigin::Signed(agent.clone()).into(), + RawOrigin::Signed(agent.clone().get()).into(), reward_account.clone(), )?; // Delegate the funds from who to the `Agent` account. - Pallet::::delegate_to_agent(RawOrigin::Signed(who.clone()).into(), agent.clone(), amount) + Pallet::::delegate_to_agent(RawOrigin::Signed(who.get()).into(), agent.get(), amount) } /// Add more delegation to the `Agent` account. fn delegate_extra( - who: &Self::AccountId, - agent: &Self::AccountId, + who: Delegator, + agent: Agent, amount: Self::Balance, ) -> DispatchResult { - Pallet::::delegate_to_agent(RawOrigin::Signed(who.clone()).into(), agent.clone(), amount) + Pallet::::delegate_to_agent(RawOrigin::Signed(who.get()).into(), agent.get(), amount) } /// Withdraw delegation of `delegator` to `Agent`. @@ -66,33 +66,31 @@ impl DelegationInterface for Pallet { /// If there are funds in `Agent` account that can be withdrawn, then those funds would be /// unlocked/released in the delegator's account. fn withdraw_delegation( - delegator: &Self::AccountId, - agent: &Self::AccountId, + delegator: Delegator, + agent: Agent, amount: Self::Balance, num_slashing_spans: u32, ) -> DispatchResult { Pallet::::release_delegation( - RawOrigin::Signed(agent.clone()).into(), - delegator.clone(), + RawOrigin::Signed(agent.get()).into(), + delegator.get(), amount, num_slashing_spans, ) } - /// Returns true if the `Agent` have any slash pending to be applied. - fn has_pending_slash(agent: &Self::AccountId) -> bool { - Agent::::get(agent) - .map(|d| !d.ledger.pending_slash.is_zero()) - .unwrap_or(false) + /// Returns pending slash of the `agent`. + fn pending_slash(agent: Agent) -> Option { + AgentLedgerOuter::::get(&agent.get()).map(|d| d.ledger.pending_slash).ok() } fn delegator_slash( - agent: &Self::AccountId, - delegator: &Self::AccountId, + agent: Agent, + delegator: Delegator, value: Self::Balance, maybe_reporter: Option, ) -> sp_runtime::DispatchResult { - Pallet::::do_slash(agent.clone(), delegator.clone(), value, maybe_reporter) + Pallet::::do_slash(agent, delegator, value, maybe_reporter) } } @@ -101,32 +99,29 @@ impl DelegationMigrator for Pallet { type AccountId = T::AccountId; fn migrate_nominator_to_agent( - agent: &Self::AccountId, + agent: Agent, reward_account: &Self::AccountId, ) -> DispatchResult { - Pallet::::migrate_to_agent( - RawOrigin::Signed(agent.clone()).into(), - reward_account.clone(), - ) + Pallet::::migrate_to_agent(RawOrigin::Signed(agent.get()).into(), reward_account.clone()) } fn migrate_delegation( - agent: &Self::AccountId, - delegator: &Self::AccountId, + agent: Agent, + delegator: Delegator, value: Self::Balance, ) -> DispatchResult { Pallet::::migrate_delegation( - RawOrigin::Signed(agent.clone()).into(), - delegator.clone(), + RawOrigin::Signed(agent.get()).into(), + delegator.get(), value, ) } /// Only used for testing. #[cfg(feature = "runtime-benchmarks")] - fn drop_agent(agent: &T::AccountId) { - >::remove(agent); + fn drop_agent(agent: Agent) { + >::remove(agent.clone().get()); >::iter() - .filter(|(_, delegation)| delegation.agent == *agent) + .filter(|(_, delegation)| delegation.agent == agent.clone().get()) .for_each(|(delegator, _)| { let _ = T::Currency::release_all( &HoldReason::StakingDelegation.into(), @@ -136,7 +131,7 @@ impl DelegationMigrator for Pallet { >::remove(&delegator); }); - T::CoreStaking::migrate_to_direct_staker(agent); + T::CoreStaking::migrate_to_direct_staker(&agent.get()); } } @@ -158,7 +153,7 @@ impl OnStakingUpdate> for Pallet { fn on_withdraw(stash: &T::AccountId, amount: BalanceOf) { // if there is a withdraw to the agent, then add it to the unclaimed withdrawals. - let _ = Agent::::get(stash) + let _ = AgentLedgerOuter::::get(stash) // can't do anything if there is an overflow error. Just raise a defensive error. .and_then(|agent| agent.add_unclaimed_withdraw(amount).defensive()) .map(|agent| agent.save()); diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 8581a4a981f..4b924bce3a5 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -153,7 +153,7 @@ use sp_runtime::{ traits::{AccountIdConversion, CheckedAdd, CheckedSub, Zero}, ArithmeticError, DispatchResult, Perbill, RuntimeDebug, Saturating, }; -use sp_staking::{EraIndex, StakingInterface, StakingUnchecked}; +use sp_staking::{Agent, Delegator, EraIndex, StakingInterface, StakingUnchecked}; use sp_std::{convert::TryInto, prelude::*}; pub type BalanceOf = @@ -345,7 +345,12 @@ pub mod pallet { num_slashing_spans: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; - Self::do_release(&who, &delegator, amount, num_slashing_spans) + Self::do_release( + Agent::from(who), + Delegator::from(delegator), + amount, + num_slashing_spans, + ) } /// Migrate delegated funds that are held in `proxy_delegator` to the claiming `delegator`'s @@ -376,11 +381,11 @@ pub mod pallet { ensure!(Self::is_agent(&agent), Error::::NotAgent); // and has enough delegated balance to migrate. - let proxy_delegator = Self::generate_proxy_delegator(agent); - let balance_remaining = Self::held_balance_of(&proxy_delegator); + let proxy_delegator = Self::generate_proxy_delegator(Agent::from(agent)); + let balance_remaining = Self::held_balance_of(proxy_delegator.clone()); ensure!(balance_remaining >= amount, Error::::NotEnoughFunds); - Self::do_migrate_delegation(&proxy_delegator, &delegator, amount) + Self::do_migrate_delegation(proxy_delegator, Delegator::from(delegator), amount) } /// Delegate given `amount` of tokens to an `Agent` account. @@ -410,10 +415,10 @@ pub mod pallet { ensure!(Self::is_agent(&agent), Error::::NotAgent); // add to delegation. - Self::do_delegate(&delegator, &agent, amount)?; + Self::do_delegate(Delegator::from(delegator), Agent::from(agent.clone()), amount)?; // bond the newly delegated amount to `CoreStaking`. - Self::do_bond(&agent, amount) + Self::do_bond(Agent::from(agent), amount) } } @@ -429,18 +434,18 @@ pub mod pallet { impl Pallet { /// Derive an account from the migrating agent account where the unclaimed delegation funds /// are held. - pub fn generate_proxy_delegator(agent: T::AccountId) -> T::AccountId { - Self::sub_account(AccountType::ProxyDelegator, agent) + pub fn generate_proxy_delegator(agent: Agent) -> Delegator { + Delegator::from(Self::sub_account(AccountType::ProxyDelegator, agent.get())) } /// Derive a (keyless) pot account from the given agent account and account type. - pub(crate) fn sub_account(account_type: AccountType, agent: T::AccountId) -> T::AccountId { - T::PalletId::get().into_sub_account_truncating((account_type, agent.clone())) + fn sub_account(account_type: AccountType, acc: T::AccountId) -> T::AccountId { + T::PalletId::get().into_sub_account_truncating((account_type, acc.clone())) } /// Held balance of a delegator. - pub(crate) fn held_balance_of(who: &T::AccountId) -> BalanceOf { - T::Currency::balance_on_hold(&HoldReason::StakingDelegation.into(), who) + pub(crate) fn held_balance_of(who: Delegator) -> BalanceOf { + T::Currency::balance_on_hold(&HoldReason::StakingDelegation.into(), &who.get()) } /// Returns true if who is registered as an `Agent`. @@ -475,10 +480,10 @@ impl Pallet { // We create a proxy delegator that will keep all the delegation funds until funds are // transferred to actual delegator. - let proxy_delegator = Self::generate_proxy_delegator(who.clone()); + let proxy_delegator = Self::generate_proxy_delegator(Agent::from(who.clone())); // Keep proxy delegator alive until all funds are migrated. - frame_system::Pallet::::inc_providers(&proxy_delegator); + frame_system::Pallet::::inc_providers(&proxy_delegator.clone().get()); // Get current stake let stake = T::CoreStaking::stake(who)?; @@ -491,11 +496,16 @@ impl Pallet { T::Currency::reducible_balance(who, Preservation::Expendable, Fortitude::Polite); // This should never fail but if it does, it indicates bad state and we abort. - T::Currency::transfer(who, &proxy_delegator, amount_to_transfer, Preservation::Expendable)?; + T::Currency::transfer( + who, + &proxy_delegator.clone().get(), + amount_to_transfer, + Preservation::Expendable, + )?; T::CoreStaking::update_payee(who, reward_account)?; // delegate all transferred funds back to agent. - Self::do_delegate(&proxy_delegator, who, amount_to_transfer)?; + Self::do_delegate(proxy_delegator, Agent::from(who.clone()), amount_to_transfer)?; // if the transferred/delegated amount was greater than the stake, mark the extra as // unclaimed withdrawal. @@ -516,32 +526,36 @@ impl Pallet { } /// Bond `amount` to `agent_acc` in [`Config::CoreStaking`]. - fn do_bond(agent_acc: &T::AccountId, amount: BalanceOf) -> DispatchResult { - let agent = Agent::::get(agent_acc)?; + fn do_bond(agent_acc: Agent, amount: BalanceOf) -> DispatchResult { + let agent_ledger = AgentLedgerOuter::::get(&agent_acc.get())?; - let available_to_bond = agent.available_to_bond(); + let available_to_bond = agent_ledger.available_to_bond(); defensive_assert!(amount == available_to_bond, "not expected value to bond"); - if agent.is_bonded() { - T::CoreStaking::bond_extra(&agent.key, amount) + if agent_ledger.is_bonded() { + T::CoreStaking::bond_extra(&agent_ledger.key, amount) } else { - T::CoreStaking::virtual_bond(&agent.key, amount, agent.reward_account()) + T::CoreStaking::virtual_bond(&agent_ledger.key, amount, agent_ledger.reward_account()) } } /// Delegate `amount` from `delegator` to `agent`. fn do_delegate( - delegator: &T::AccountId, - agent: &T::AccountId, + delegator: Delegator, + agent: Agent, amount: BalanceOf, ) -> DispatchResult { - let mut ledger = AgentLedger::::get(agent).ok_or(Error::::NotAgent)?; + // get inner type + let agent = agent.get(); + let delegator = delegator.get(); + + let mut ledger = AgentLedger::::get(&agent).ok_or(Error::::NotAgent)?; // try to hold the funds. - T::Currency::hold(&HoldReason::StakingDelegation.into(), delegator, amount)?; + T::Currency::hold(&HoldReason::StakingDelegation.into(), &delegator, amount)?; let new_delegation_amount = - if let Some(existing_delegation) = Delegation::::get(delegator) { - ensure!(&existing_delegation.agent == agent, Error::::InvalidDelegation); + if let Some(existing_delegation) = Delegation::::get(&delegator) { + ensure!(existing_delegation.agent == agent, Error::::InvalidDelegation); existing_delegation .amount .checked_add(&amount) @@ -550,54 +564,54 @@ impl Pallet { amount }; - Delegation::::new(agent, new_delegation_amount).update_or_kill(delegator); + Delegation::::new(&agent, new_delegation_amount).update_or_kill(&delegator); ledger.total_delegated = ledger.total_delegated.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; - ledger.update(agent); + ledger.update(&agent); - Self::deposit_event(Event::::Delegated { - agent: agent.clone(), - delegator: delegator.clone(), - amount, - }); + Self::deposit_event(Event::::Delegated { agent, delegator, amount }); Ok(()) } /// Release `amount` of delegated funds from `agent` to `delegator`. fn do_release( - who: &T::AccountId, - delegator: &T::AccountId, + who: Agent, + delegator: Delegator, amount: BalanceOf, num_slashing_spans: u32, ) -> DispatchResult { - let mut agent = Agent::::get(who)?; - let mut delegation = Delegation::::get(delegator).ok_or(Error::::NotDelegator)?; + // get inner type + let agent = who.get(); + let delegator = delegator.get(); + + let mut agent_ledger = AgentLedgerOuter::::get(&agent)?; + let mut delegation = Delegation::::get(&delegator).ok_or(Error::::NotDelegator)?; // make sure delegation to be released is sound. - ensure!(&delegation.agent == who, Error::::NotAgent); + ensure!(delegation.agent == agent, Error::::NotAgent); ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); // if we do not already have enough funds to be claimed, try withdraw some more. // keep track if we killed the staker in the process. - let stash_killed = if agent.ledger.unclaimed_withdrawals < amount { + let stash_killed = if agent_ledger.ledger.unclaimed_withdrawals < amount { // withdraw account. - let killed = T::CoreStaking::withdraw_unbonded(who.clone(), num_slashing_spans) + let killed = T::CoreStaking::withdraw_unbonded(agent.clone(), num_slashing_spans) .map_err(|_| Error::::WithdrawFailed)?; // reload agent from storage since withdrawal might have changed the state. - agent = agent.refresh()?; + agent_ledger = agent_ledger.reload()?; Some(killed) } else { None }; // if we still do not have enough funds to release, abort. - ensure!(agent.ledger.unclaimed_withdrawals >= amount, Error::::NotEnoughFunds); + ensure!(agent_ledger.ledger.unclaimed_withdrawals >= amount, Error::::NotEnoughFunds); // Claim withdraw from agent. Kill agent if no delegation left. // TODO: Ideally if there is a register, there should be an unregister that should // clean up the agent. Can be improved in future. - if agent.remove_unclaimed_withdraw(amount)?.update_or_kill()? { + if agent_ledger.remove_unclaimed_withdraw(amount)?.update_or_kill()? { match stash_killed { Some(killed) => { // this implies we did a `CoreStaking::withdraw` before release. Ensure @@ -607,12 +621,12 @@ impl Pallet { None => { // We did not do a `CoreStaking::withdraw` before release. Ensure staker is // already killed in `CoreStaking`. - ensure!(T::CoreStaking::status(who).is_err(), Error::::BadState); + ensure!(T::CoreStaking::status(&agent).is_err(), Error::::BadState); }, } // Remove provider reference for `who`. - let _ = frame_system::Pallet::::dec_providers(who).defensive(); + let _ = frame_system::Pallet::::dec_providers(&agent).defensive(); } // book keep delegation @@ -622,56 +636,56 @@ impl Pallet { .defensive_ok_or(ArithmeticError::Overflow)?; // remove delegator if nothing delegated anymore - delegation.update_or_kill(delegator); + delegation.update_or_kill(&delegator); let released = T::Currency::release( &HoldReason::StakingDelegation.into(), - delegator, + &delegator, amount, Precision::BestEffort, )?; defensive_assert!(released == amount, "hold should have been released fully"); - Self::deposit_event(Event::::Released { - agent: who.clone(), - delegator: delegator.clone(), - amount, - }); + Self::deposit_event(Event::::Released { agent, delegator, amount }); Ok(()) } /// Migrates delegation of `amount` from `source` account to `destination` account. fn do_migrate_delegation( - source_delegator: &T::AccountId, - destination_delegator: &T::AccountId, + source_delegator: Delegator, + destination_delegator: Delegator, amount: BalanceOf, ) -> DispatchResult { + // get inner type + let source_delegator = source_delegator.get(); + let destination_delegator = destination_delegator.get(); + let mut source_delegation = - Delegators::::get(source_delegator).defensive_ok_or(Error::::BadState)?; + Delegators::::get(&source_delegator).defensive_ok_or(Error::::BadState)?; // some checks that must have already been checked before. ensure!(source_delegation.amount >= amount, Error::::NotEnoughFunds); debug_assert!( - !Self::is_delegator(destination_delegator) && !Self::is_agent(destination_delegator) + !Self::is_delegator(&destination_delegator) && !Self::is_agent(&destination_delegator) ); let agent = source_delegation.agent.clone(); // update delegations - Delegation::::new(&agent, amount).update_or_kill(destination_delegator); + Delegation::::new(&agent, amount).update_or_kill(&destination_delegator); source_delegation.amount = source_delegation .amount .checked_sub(&amount) .defensive_ok_or(Error::::BadState)?; - source_delegation.update_or_kill(source_delegator); + source_delegation.update_or_kill(&source_delegator); // release funds from source let released = T::Currency::release( &HoldReason::StakingDelegation.into(), - source_delegator, + &source_delegator, amount, Precision::BestEffort, )?; @@ -680,8 +694,8 @@ impl Pallet { // transfer the released amount to `destination_delegator`. let post_balance = T::Currency::transfer( - source_delegator, - destination_delegator, + &source_delegator, + &destination_delegator, amount, Preservation::Expendable, ) @@ -689,15 +703,15 @@ impl Pallet { // if balance is zero, clear provider for source (proxy) delegator. if post_balance == Zero::zero() { - let _ = frame_system::Pallet::::dec_providers(source_delegator).defensive(); + let _ = frame_system::Pallet::::dec_providers(&source_delegator).defensive(); } // hold the funds again in the new delegator account. - T::Currency::hold(&HoldReason::StakingDelegation.into(), destination_delegator, amount)?; + T::Currency::hold(&HoldReason::StakingDelegation.into(), &destination_delegator, amount)?; Self::deposit_event(Event::::MigratedDelegation { agent, - delegator: destination_delegator.clone(), + delegator: destination_delegator, amount, }); @@ -706,17 +720,21 @@ impl Pallet { /// Take slash `amount` from agent's `pending_slash`counter and apply it to `delegator` account. pub fn do_slash( - agent_acc: T::AccountId, - delegator: T::AccountId, + agent: Agent, + delegator: Delegator, amount: BalanceOf, maybe_reporter: Option, ) -> DispatchResult { - let agent = Agent::::get(&agent_acc)?; + // get inner type + let agent = agent.get(); + let delegator = delegator.get(); + + let agent_ledger = AgentLedgerOuter::::get(&agent)?; // ensure there is something to slash - ensure!(agent.ledger.pending_slash > Zero::zero(), Error::::NothingToSlash); + ensure!(agent_ledger.ledger.pending_slash > Zero::zero(), Error::::NothingToSlash); let mut delegation = >::get(&delegator).ok_or(Error::::NotDelegator)?; - ensure!(delegation.agent == agent_acc, Error::::NotAgent); + ensure!(delegation.agent == agent.clone(), Error::::NotAgent); ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); // slash delegator @@ -728,7 +746,7 @@ impl Pallet { let actual_slash = credit.peek(); // remove the applied slashed amount from agent. - agent.remove_slash(actual_slash).save(); + agent_ledger.remove_slash(actual_slash).save(); delegation.amount = delegation.amount.checked_sub(&actual_slash).ok_or(ArithmeticError::Overflow)?; delegation.update_or_kill(&delegator); @@ -746,15 +764,15 @@ impl Pallet { T::OnSlash::on_unbalanced(credit); - Self::deposit_event(Event::::Slashed { agent: agent_acc, delegator, amount }); + Self::deposit_event(Event::::Slashed { agent, delegator, amount }); Ok(()) } /// Total balance that is available for stake. Includes already staked amount. #[cfg(test)] - pub(crate) fn stakeable_balance(who: &T::AccountId) -> BalanceOf { - Agent::::get(who) + pub(crate) fn stakeable_balance(who: Agent) -> BalanceOf { + AgentLedgerOuter::::get(&who.get()) .map(|agent| agent.ledger.stakeable_balance()) .unwrap_or_default() } diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index b9eaffb970e..c1875055f2f 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{self as delegated_staking, types::Agent}; +use crate::{self as delegated_staking, types::AgentLedgerOuter}; use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, @@ -34,7 +34,7 @@ use frame_support::dispatch::RawOrigin; use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra}; use sp_core::U256; use sp_runtime::traits::Convert; -use sp_staking::{Stake, StakingInterface}; +use sp_staking::{Agent, Stake, StakingInterface}; pub type T = Runtime; type Block = frame_system::mocking::MockBlock; @@ -309,8 +309,8 @@ pub(crate) fn setup_delegation_stake( } // sanity checks - assert_eq!(DelegatedStaking::stakeable_balance(&agent), delegated_amount); - assert_eq!(Agent::::get(&agent).unwrap().available_to_bond(), 0); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), delegated_amount); + assert_eq!(AgentLedgerOuter::::get(&agent).unwrap().available_to_bond(), 0); delegated_amount } @@ -322,11 +322,11 @@ pub(crate) fn start_era(era: sp_staking::EraIndex) { pub(crate) fn eq_stake(who: AccountId, total: Balance, active: Balance) -> bool { Staking::stake(&who).unwrap() == Stake { total, active } && - get_agent(&who).ledger.stakeable_balance() == total + get_agent_ledger(&who).ledger.stakeable_balance() == total } -pub(crate) fn get_agent(agent: &AccountId) -> Agent { - Agent::::get(agent).expect("delegate should exist") +pub(crate) fn get_agent_ledger(agent: &AccountId) -> AgentLedgerOuter { + AgentLedgerOuter::::get(agent).expect("delegate should exist") } parameter_types! { diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index 6b68726b274..d40539d40dd 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -22,7 +22,7 @@ use crate::mock::*; use frame_support::{assert_noop, assert_ok, traits::fungible::InspectHold}; use pallet_nomination_pools::{Error as PoolsError, Event as PoolsEvent}; use pallet_staking::Error as StakingError; -use sp_staking::{DelegationInterface, StakerStatus}; +use sp_staking::{Agent, DelegationInterface, Delegator, StakerStatus}; #[test] fn create_an_agent_with_first_delegator() { @@ -48,12 +48,12 @@ fn create_an_agent_with_first_delegator() { // verify assert!(DelegatedStaking::is_agent(&agent)); - assert_eq!(DelegatedStaking::stakeable_balance(&agent), 100); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 100); assert_eq!( Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), 100 ); - assert_eq!(DelegatedStaking::held_balance_of(&delegator), 100); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 100); }); } @@ -102,7 +102,7 @@ fn create_multiple_delegators() { // stakeable balance is 0 for non agent fund(&agent, 1000); assert!(!DelegatedStaking::is_agent(&agent)); - assert_eq!(DelegatedStaking::stakeable_balance(&agent), 0); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 0); // set intention to accept delegation. assert_ok!(DelegatedStaking::register_agent( @@ -124,7 +124,7 @@ fn create_multiple_delegators() { // verify assert!(DelegatedStaking::is_agent(&agent)); - assert_eq!(DelegatedStaking::stakeable_balance(&agent), 100 * 100); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 100 * 100); }); } @@ -260,39 +260,55 @@ fn apply_pending_slash() { // agent cannot slash an account that is not its delegator. setup_delegation_stake(210, 211, (351..=352).collect(), 100, 0); assert_noop!( - ::delegator_slash(&agent, &351, 1, Some(400)), + ::delegator_slash( + Agent::from(agent), + Delegator::from(351), + 1, + Some(400) + ), Error::::NotAgent ); // or a non delegator account fund(&353, 100); assert_noop!( - ::delegator_slash(&agent, &353, 1, Some(400)), + ::delegator_slash( + Agent::from(agent), + Delegator::from(353), + 1, + Some(400) + ), Error::::NotDelegator ); // ensure bookkept pending slash is correct. - assert_eq!(get_agent(&agent).ledger.pending_slash, total_staked / 2); + assert_eq!(get_agent_ledger(&agent).ledger.pending_slash, total_staked / 2); let mut old_reporter_balance = Balances::free_balance(reporter); // lets apply the pending slash on delegators. for i in delegators { // balance before slash - let initial_pending_slash = get_agent(&agent).ledger.pending_slash; + let initial_pending_slash = get_agent_ledger(&agent).ledger.pending_slash; assert!(initial_pending_slash > 0); - let unslashed_balance = DelegatedStaking::held_balance_of(&i); + let unslashed_balance = DelegatedStaking::held_balance_of(Delegator::from(i)); let slash = unslashed_balance / 2; // slash half of delegator's delegation. assert_ok!(::delegator_slash( - &agent, - &i, + Agent::from(agent), + Delegator::from(i), slash, Some(400) )); // balance after slash. - assert_eq!(DelegatedStaking::held_balance_of(&i), unslashed_balance - slash); + assert_eq!( + DelegatedStaking::held_balance_of(Delegator::from(i)), + unslashed_balance - slash + ); // pending slash is reduced by the amount slashed. - assert_eq!(get_agent(&agent).ledger.pending_slash, initial_pending_slash - slash); + assert_eq!( + get_agent_ledger(&agent).ledger.pending_slash, + initial_pending_slash - slash + ); // reporter get 10% of the slash amount. assert_eq!( Balances::free_balance(reporter) - old_reporter_balance, @@ -303,11 +319,16 @@ fn apply_pending_slash() { } // nothing to slash anymore - assert_eq!(get_agent(&agent).ledger.pending_slash, 0); + assert_eq!(get_agent_ledger(&agent).ledger.pending_slash, 0); // cannot slash anymore assert_noop!( - ::delegator_slash(&agent, &350, 1, None), + ::delegator_slash( + Agent::from(agent), + Delegator::from(350), + 1, + None + ), Error::::NothingToSlash ); }); @@ -332,7 +353,7 @@ mod staking_integration { RawOrigin::Signed(agent).into(), reward_acc )); - assert_eq!(DelegatedStaking::stakeable_balance(&agent), 0); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 0); let mut delegated_balance: Balance = 0; @@ -349,9 +370,12 @@ mod staking_integration { Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), 100 ); - assert_eq!(DelegatedStaking::delegator_balance(&delegator), 100); + assert_eq!( + DelegatedStaking::delegator_balance(Delegator::from(delegator)).unwrap(), + 100 + ); - let agent_obj = get_agent(&agent); + let agent_obj = get_agent_ledger(&agent); assert_eq!(agent_obj.ledger.stakeable_balance(), delegated_balance); assert_eq!(agent_obj.available_to_bond(), 0); assert_eq!(agent_obj.bonded_stake(), delegated_balance); @@ -403,9 +427,9 @@ mod staking_integration { Error::::NotEnoughFunds ); - assert_eq!(get_agent(&agent).available_to_bond(), 0); + assert_eq!(get_agent_ledger(&agent).available_to_bond(), 0); // full amount is still delegated - assert_eq!(get_agent(&agent).ledger.effective_balance(), total_staked); + assert_eq!(get_agent_ledger(&agent).ledger.effective_balance(), total_staked); start_era(5); // at era 5, 50 tokens are withdrawable, cannot withdraw more. @@ -491,7 +515,7 @@ mod staking_integration { start_era(i); assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); // no withdrawals from core staking yet. - assert_eq!(get_agent(&agent).ledger.unclaimed_withdrawals, 0); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 0); } // another unbond would trigger withdrawal @@ -500,7 +524,7 @@ mod staking_integration { // 8 previous unbonds would be withdrawn as they were already unlocked. Unlocking period // is 3 eras. - assert_eq!(get_agent(&agent).ledger.unclaimed_withdrawals, 8 * 10); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 8 * 10); // release some delegation now. assert_ok!(DelegatedStaking::release_delegation( @@ -509,7 +533,7 @@ mod staking_integration { 40, 0 )); - assert_eq!(get_agent(&agent).ledger.unclaimed_withdrawals, 80 - 40); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 80 - 40); // cannot release more than available assert_noop!( @@ -523,7 +547,7 @@ mod staking_integration { 0 )); - assert_eq!(DelegatedStaking::held_balance_of(&300), 100 - 80); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 100 - 80); }); } @@ -561,8 +585,8 @@ mod staking_integration { // amount is staked correctly assert!(eq_stake(200, 100, 100)); - assert_eq!(get_agent(&200).available_to_bond(), 0); - assert_eq!(get_agent(&200).ledger.effective_balance(), 100); + assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), 100); // free balance of delegate is untouched assert_eq!(Balances::free_balance(200), balance_200); @@ -624,7 +648,8 @@ mod staking_integration { // to migrate, nominator needs to set an account as a proxy delegator where staked funds // will be moved and delegated back to this old nominator account. This should be funded // with at least ED. - let proxy_delegator = DelegatedStaking::generate_proxy_delegator(200); + let proxy_delegator = + DelegatedStaking::generate_proxy_delegator(Agent::from(200)).get(); assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(200).into(), 201)); @@ -637,9 +662,12 @@ mod staking_integration { // stake amount is transferred from delegate to proxy delegator account. assert_eq!(Balances::free_balance(200), 0); assert_eq!(Staking::stake(&200).unwrap(), init_stake); - assert_eq!(get_agent(&200).ledger.effective_balance(), agent_amount); - assert_eq!(get_agent(&200).available_to_bond(), 0); - assert_eq!(get_agent(&200).ledger.unclaimed_withdrawals, agent_amount - staked_amount); + assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!( + get_agent_ledger(&200).ledger.unclaimed_withdrawals, + agent_amount - staked_amount + ); // now lets migrate the delegators let delegator_share = agent_amount / 4; @@ -668,10 +696,10 @@ mod staking_integration { // delegate stake is unchanged. assert_eq!(Staking::stake(&200).unwrap(), init_stake); - assert_eq!(get_agent(&200).ledger.effective_balance(), agent_amount); - assert_eq!(get_agent(&200).available_to_bond(), 0); + assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); assert_eq!( - get_agent(&200).ledger.unclaimed_withdrawals, + get_agent_ledger(&200).ledger.unclaimed_withdrawals, agent_amount - staked_amount ); } @@ -697,7 +725,7 @@ mod pool_integration { let delegate_amount = 200; // nothing held initially - assert_eq!(DelegatedStaking::held_balance_of(&creator), 0); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(creator)), 0); // create pool assert_ok!(Pools::create( @@ -709,10 +737,13 @@ mod pool_integration { )); // correct amount is locked in depositor's account. - assert_eq!(DelegatedStaking::held_balance_of(&creator), delegate_amount); + assert_eq!( + DelegatedStaking::held_balance_of(Delegator::from(creator)), + delegate_amount + ); let pool_account = Pools::generate_bonded_account(1); - let agent = get_agent(&pool_account); + let agent = get_agent_ledger(&pool_account); // verify state assert_eq!(agent.ledger.effective_balance(), delegate_amount); @@ -733,19 +764,19 @@ mod pool_integration { let delegator: AccountId = 300; fund(&delegator, 500); // nothing held initially - assert_eq!(DelegatedStaking::held_balance_of(&delegator), 0); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 0); // delegator joins pool assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), 100, pool_id)); staked_amount += 100; // correct amount is locked in depositor's account. - assert_eq!(DelegatedStaking::held_balance_of(&delegator), 100); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 100); // delegator is not actively exposed to core staking. assert_eq!(Staking::status(&delegator), Err(StakingError::::NotStash.into())); - let pool_agent = get_agent(&Pools::generate_bonded_account(1)); + let pool_agent = get_agent_ledger(&Pools::generate_bonded_account(1)); // verify state assert_eq!(pool_agent.ledger.effective_balance(), staked_amount); assert_eq!(pool_agent.bonded_stake(), staked_amount); @@ -763,10 +794,10 @@ mod pool_integration { fund(&i, 500); assert_ok!(Pools::join(RawOrigin::Signed(i).into(), 100 + i, pool_id)); staked_amount += 100 + i; - assert_eq!(DelegatedStaking::held_balance_of(&i), 100 + i); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), 100 + i); } - let pool_agent = pool_agent.refresh().unwrap(); + let pool_agent = pool_agent.reload().unwrap(); assert_eq!(pool_agent.ledger.effective_balance(), staked_amount); assert_eq!(pool_agent.bonded_stake(), staked_amount); assert_eq!(pool_agent.available_to_bond(), 0); @@ -812,7 +843,8 @@ mod pool_integration { // claim rewards for i in 300..320 { let pre_balance = Balances::free_balance(i); - let delegator_staked_balance = DelegatedStaking::held_balance_of(&i); + let delegator_staked_balance = + DelegatedStaking::held_balance_of(Delegator::from(i)); // payout reward assert_ok!(Pools::claim_payout(RawOrigin::Signed(i).into())); @@ -880,7 +912,7 @@ mod pool_integration { // at era 5, 301 can withdraw. System::reset_events(); - let held_301 = DelegatedStaking::held_balance_of(&301); + let held_301 = DelegatedStaking::held_balance_of(Delegator::from(301)); let free_301 = Balances::free_balance(301); assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(301).into(), 301, 0)); @@ -892,7 +924,7 @@ mod pool_integration { pool_events_since_last_call(), vec![PoolsEvent::Withdrawn { member: 301, pool_id, balance: 50, points: 50 }] ); - assert_eq!(DelegatedStaking::held_balance_of(&301), held_301 - 50); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(301)), held_301 - 50); assert_eq!(Balances::free_balance(301), free_301 + 50); start_era(7); @@ -1069,6 +1101,11 @@ mod pool_integration { BondedPools::::get(1).unwrap().points, creator_stake + delegator_stake * 6 - delegator_stake * 3 ); + + // pool has currently no pending slash + assert_eq!(Pools::api_pool_pending_slash(pool_id), 0); + + // slash the pool partially pallet_staking::slashing::do_slash::( &pool_acc, 500, @@ -1077,6 +1114,9 @@ mod pool_integration { 3, ); + // pool has now pending slash of 500. + assert_eq!(Pools::api_pool_pending_slash(pool_id), 500); + assert_eq!( pool_events_since_last_call(), vec![ @@ -1091,9 +1131,9 @@ mod pool_integration { ); // slash is lazy and balance is still locked in user's accounts. - assert_eq!(DelegatedStaking::held_balance_of(&creator), creator_stake); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(creator)), creator_stake); for i in 300..306 { - assert_eq!(DelegatedStaking::held_balance_of(&i), delegator_stake); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), delegator_stake); } assert_eq!( get_pool_agent(pool_id).ledger.effective_balance(), @@ -1128,7 +1168,7 @@ mod pool_integration { ] ); assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, pre_pending_slash - 50); - assert_eq!(DelegatedStaking::held_balance_of(&i), 0); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), 0); assert_eq!(Balances::free_balance(i) - pre_balance, 50); } @@ -1139,19 +1179,38 @@ mod pool_integration { for i in 303..306 { let pre_pending_slash = get_pool_agent(pool_id).ledger.pending_slash; + // pool api returns correct pending slash. + assert_eq!(Pools::api_pool_pending_slash(pool_id), pre_pending_slash); + // delegator has pending slash of 50. + assert_eq!(Pools::api_member_pending_slash(i), 50); + // apply slash assert_ok!(Pools::apply_slash(RawOrigin::Signed(slash_reporter).into(), i)); + // nothing pending anymore. + assert_eq!(Pools::api_member_pending_slash(i), 0); // each member is slashed 50% of 100 = 50. assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, pre_pending_slash - 50); + // pool api returns correctly as well. + assert_eq!(Pools::api_pool_pending_slash(pool_id), pre_pending_slash - 50); // left with 50. - assert_eq!(DelegatedStaking::held_balance_of(&i), 50); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), 50); } + + // pool has still pending slash of creator. + assert_eq!(Pools::api_pool_pending_slash(pool_id), 250); + // reporter is paid SlashRewardFraction of the slash, i.e. 10% of 50 = 5 assert_eq!(Balances::free_balance(slash_reporter), 100 + 5 * 3); + // creator has pending slash. + assert_eq!(Pools::api_member_pending_slash(creator), 250); // slash creator assert_ok!(Pools::apply_slash(RawOrigin::Signed(slash_reporter).into(), creator)); + // no pending slash anymore. + assert_eq!(Pools::api_member_pending_slash(creator), 0); + // all slash should be applied now. assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 0); + assert_eq!(Pools::api_pool_pending_slash(pool_id), 0); // for creator, 50% of stake should be slashed (250), 10% of which should go to reporter // (25). assert_eq!(Balances::free_balance(slash_reporter), 115 + 25); @@ -1178,7 +1237,7 @@ mod pool_integration { } } - fn get_pool_agent(pool_id: u32) -> Agent { - get_agent(&Pools::generate_bonded_account(pool_id)) + fn get_pool_agent(pool_id: u32) -> AgentLedgerOuter { + get_agent_ledger(&Pools::generate_bonded_account(pool_id)) } } diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs index 958d81c294a..24b45735654 100644 --- a/substrate/frame/delegated-staking/src/types.rs +++ b/substrate/frame/delegated-staking/src/types.rs @@ -143,18 +143,18 @@ impl AgentLedger { /// Wrapper around `AgentLedger` to provide some helper functions to mutate the ledger. #[derive(Clone)] -pub struct Agent { +pub struct AgentLedgerOuter { /// storage key pub key: T::AccountId, /// storage value pub ledger: AgentLedger, } -impl Agent { +impl AgentLedgerOuter { /// Get `Agent` from storage if it exists or return an error. - pub(crate) fn get(agent: &T::AccountId) -> Result, DispatchError> { + pub(crate) fn get(agent: &T::AccountId) -> Result, DispatchError> { let ledger = AgentLedger::::get(agent).ok_or(Error::::NotAgent)?; - Ok(Agent { key: agent.clone(), ledger }) + Ok(AgentLedgerOuter { key: agent.clone(), ledger }) } /// Remove funds that are withdrawn from [Config::CoreStaking] but not claimed by a delegator. @@ -176,7 +176,7 @@ impl Agent { .checked_sub(&amount) .defensive_ok_or(ArithmeticError::Overflow)?; - Ok(Agent { + Ok(AgentLedgerOuter { ledger: AgentLedger { total_delegated: new_total_delegated, unclaimed_withdrawals: new_unclaimed_withdrawals, @@ -197,7 +197,7 @@ impl Agent { .checked_add(&amount) .defensive_ok_or(ArithmeticError::Overflow)?; - Ok(Agent { + Ok(AgentLedgerOuter { ledger: AgentLedger { unclaimed_withdrawals: new_unclaimed_withdrawals, ..self.ledger }, ..self }) @@ -224,7 +224,10 @@ impl Agent { let pending_slash = self.ledger.pending_slash.defensive_saturating_sub(amount); let total_delegated = self.ledger.total_delegated.defensive_saturating_sub(amount); - Agent { ledger: AgentLedger { pending_slash, total_delegated, ..self.ledger }, ..self } + AgentLedgerOuter { + ledger: AgentLedger { pending_slash, total_delegated, ..self.ledger }, + ..self + } } /// Get the total stake of agent bonded in [`Config::CoreStaking`]. @@ -270,7 +273,7 @@ impl Agent { } /// Reloads self from storage. - pub(crate) fn refresh(self) -> Result, DispatchError> { + pub(crate) fn reload(self) -> Result, DispatchError> { Self::get(&self.key) } diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs index 43de0fddb8b..b8c978945e9 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -29,7 +29,7 @@ use frame_support::{ }; use frame_system::RawOrigin as RuntimeOrigin; use pallet_nomination_pools::{ - adapter::{StakeStrategy, StakeStrategyType}, + adapter::{Member, Pool, StakeStrategy, StakeStrategyType}, BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, @@ -116,7 +116,7 @@ fn migrate_to_transfer_stake(pool_id: PoolId) { } let pool_acc = Pools::::generate_bonded_account(pool_id); // drop the agent and its associated delegators . - T::StakeAdapter::remove_as_agent(&pool_acc); + T::StakeAdapter::remove_as_agent(Pool::from(pool_acc.clone())); // tranfer funds from all members to the pool account. PoolMembers::::iter() @@ -139,8 +139,12 @@ fn vote_to_balance( vote.try_into().map_err(|_| "could not convert u64 to Balance") } -fn is_transfer_stake_strategy() -> bool { - T::StakeAdapter::strategy_type() == StakeStrategyType::Transfer +/// `assertion` should strictly be true if the adapter is using `Delegate` strategy and strictly +/// false if the adapter is not using `Delegate` strategy. +fn assert_if_delegate(assertion: bool) { + let legacy_adapter_used = T::StakeAdapter::strategy_type() != StakeStrategyType::Delegate; + // one and only one of the two should be true. + assert!(assertion ^ legacy_adapter_used); } #[allow(unused)] @@ -182,7 +186,7 @@ impl ListScenario { create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); T::StakeAdapter::nominate( - &pool_origin1, + Pool::from(pool_origin1.clone()), // NOTE: these don't really need to be validators. vec![account("random_validator", 0, USER_SEED)], )?; @@ -191,7 +195,7 @@ impl ListScenario { create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); T::StakeAdapter::nominate( - &pool_origin2, + Pool::from(pool_origin2.clone()), vec![account("random_validator", 0, USER_SEED)].clone(), )?; @@ -208,7 +212,10 @@ impl ListScenario { let (_, pool_dest1) = create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); - T::StakeAdapter::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; + T::StakeAdapter::nominate( + Pool::from(pool_dest1.clone()), + vec![account("random_validator", 0, USER_SEED)], + )?; let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); @@ -234,11 +241,11 @@ impl ListScenario { self.origin1_member = Some(joiner.clone()); CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); - let original_bonded = T::StakeAdapter::active_stake(&self.origin1); + let original_bonded = T::StakeAdapter::active_stake(Pool::from(self.origin1.clone())); // Unbond `amount` from the underlying pool account so when the member joins // we will maintain `current_bonded`. - T::StakeAdapter::unbond(&self.origin1, amount) + T::StakeAdapter::unbond(Pool::from(self.origin1.clone()), amount) .expect("the pool was created in `Self::new`."); // Account pool points for the unbonded balance. @@ -275,7 +282,7 @@ frame_benchmarking::benchmarks! { // setup the worst case list scenario. let scenario = ListScenario::::new(origin_weight, true)?; assert_eq!( - T::StakeAdapter::active_stake(&scenario.origin1), + T::StakeAdapter::active_stake(Pool::from(scenario.origin1.clone())), origin_weight ); @@ -290,7 +297,7 @@ frame_benchmarking::benchmarks! { verify { assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); assert_eq!( - T::StakeAdapter::active_stake(&scenario.origin1), + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)), scenario.dest_weight ); } @@ -305,7 +312,7 @@ frame_benchmarking::benchmarks! { }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) verify { assert!( - T::StakeAdapter::active_stake(&scenario.origin1) >= + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= scenario.dest_weight ); } @@ -329,7 +336,7 @@ frame_benchmarking::benchmarks! { verify { // commission of 50% deducted here. assert!( - T::StakeAdapter::active_stake(&scenario.origin1) >= + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= scenario.dest_weight / 2u32.into() ); } @@ -383,7 +390,7 @@ frame_benchmarking::benchmarks! { whitelist_account!(member_id); }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) verify { - let bonded_after = T::StakeAdapter::active_stake(&scenario.origin1); + let bonded_after = T::StakeAdapter::active_stake(Pool::from(scenario.origin1)); // We at least went down to the destination bag assert!(bonded_after <= scenario.dest_weight); let member = PoolMembers::::get( @@ -414,7 +421,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::StakeAdapter::active_stake(&pool_account), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); @@ -424,7 +431,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::StakeAdapter::active_stake(&pool_account), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -457,7 +464,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::StakeAdapter::active_stake(&pool_account), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); @@ -468,7 +475,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::StakeAdapter::active_stake(&pool_account), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -514,12 +521,12 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::StakeAdapter::active_stake(&pool_account), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), Zero::zero() ); assert_eq!( - T::StakeAdapter::total_balance(&pool_account), - min_create_bond + T::StakeAdapter::total_balance(Pool::from(pool_account.clone())), + Some(min_create_bond) ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -594,7 +601,7 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::StakeAdapter::active_stake(&Pools::::generate_bonded_account(1)), + T::StakeAdapter::active_stake(Pool::from(Pools::::generate_bonded_account(1))), min_create_bond ); } @@ -634,7 +641,7 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::StakeAdapter::active_stake(&Pools::::generate_bonded_account(1)), + T::StakeAdapter::active_stake(Pool::from(Pools::::generate_bonded_account(1))), min_create_bond ); } @@ -719,13 +726,13 @@ frame_benchmarking::benchmarks! { .map(|i| account("stash", USER_SEED, i)) .collect(); - assert_ok!(T::StakeAdapter::nominate(&pool_account, validators)); - assert!(T::StakeAdapter::nominations(&Pools::::generate_bonded_account(1)).is_some()); + assert_ok!(T::StakeAdapter::nominate(Pool::from(pool_account.clone()), validators)); + assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_some()); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1) verify { - assert!(T::StakeAdapter::nominations(&Pools::::generate_bonded_account(1)).is_none()); + assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_none()); } set_commission { @@ -824,7 +831,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::StakeAdapter::active_stake(&pool_account), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) @@ -888,7 +895,7 @@ frame_benchmarking::benchmarks! { // verify user balance in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); // verify delegated balance. - assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); // ugly type conversion between balances of pallet staking and pools (which really are same // type). Maybe there is a better way? @@ -906,7 +913,7 @@ frame_benchmarking::benchmarks! { // verify user balance is slashed in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); // verify delegated balance are not yet slashed. - assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); // Fill member's sub pools for the worst case. for i in 1..(T::MaxUnbonding::get() + 1) { @@ -920,14 +927,12 @@ frame_benchmarking::benchmarks! { whitelist_account!(depositor); }: { - let res = Pools::::apply_slash(RuntimeOrigin::Signed(slash_reporter.clone()).into(), depositor_lookup.clone()); - // for transfer stake strategy, apply slash would error, otherwise success. - assert!(is_transfer_stake_strategy::() ^ res.is_ok()); + assert_if_delegate::(Pools::::apply_slash(RuntimeOrigin::Signed(slash_reporter.clone()).into(), depositor_lookup.clone()).is_ok()); } verify { // verify balances are correct and slash applied. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); - assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount/2u32.into()); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount/2u32.into())); } apply_slash_fail { @@ -979,13 +984,11 @@ frame_benchmarking::benchmarks! { // migrate pool to transfer stake. let _ = migrate_to_transfer_stake::(1); }: { - // Try migrate to `DelegateStake`. Would succeed only if `DelegateStake` strategy is used. - let res = Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()); - assert!(is_transfer_stake_strategy::() ^ res.is_ok()); + assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); } verify { // this queries agent balance if `DelegateStake` strategy. - assert!(T::StakeAdapter::total_balance(&pool_account) == deposit_amount); + assert!(T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) == Some(deposit_amount)); } migrate_delegation { @@ -998,22 +1001,20 @@ frame_benchmarking::benchmarks! { let _ = migrate_to_transfer_stake::(1); // Now migrate pool to delegate stake keeping delegators unmigrated. - let migration_res = Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()); - assert!(is_transfer_stake_strategy::() ^ migration_res.is_ok()); + assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); - // verify balances that we will check again later. - assert!(T::StakeAdapter::member_delegation_balance(&depositor) == Zero::zero()); + // delegation does not exist. + assert!(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())).is_none()); + // contribution exists in the pool. assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); whitelist_account!(depositor); }: { - let res = Pools::::migrate_delegation(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone()); - // for transfer stake strategy, apply slash would error, otherwise success. - assert!(is_transfer_stake_strategy::() ^ res.is_ok()); + assert_if_delegate::(Pools::::migrate_delegation(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone()).is_ok()); } verify { // verify balances once more. - assert!(is_transfer_stake_strategy::() || T::StakeAdapter::member_delegation_balance(&depositor) == deposit_amount); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); } diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 881c3c36331..67627e0acb1 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -38,5 +38,30 @@ sp_api::decl_runtime_apis! { /// Returns the equivalent points of `new_funds` for a given pool. fn balance_to_points(pool_id: PoolId, new_funds: Balance) -> Balance; + + /// Returns the pending slash for a given pool. + fn pool_pending_slash(pool_id: PoolId) -> Balance; + + /// Returns the pending slash for a given pool member. + fn member_pending_slash(member: AccountId) -> Balance; + + /// Returns true if the pool with `pool_id` needs migration. + /// + /// This can happen when the `pallet-nomination-pools` has switched to using strategy + /// [`DelegateStake`](pallet_nomination_pools::adapter::DelegateStake) but the pool + /// still has funds that were staked using the older strategy + /// [TransferStake](pallet_nomination_pools::adapter::TransferStake). Use + /// [`migrate_pool_to_delegate_stake`](pallet_nomination_pools::Call::migrate_pool_to_delegate_stake) + /// to migrate the pool. + fn pool_needs_delegate_migration(pool_id: PoolId) -> bool; + + /// Returns true if the delegated funds of the pool `member` needs migration. + /// + /// Once a pool has successfully migrated to the strategy + /// [`DelegateStake`](pallet_nomination_pools::adapter::DelegateStake), the funds of the + /// member can be migrated from pool account to the member's account. Use + /// [`migrate_delegation`](pallet_nomination_pools::Call::migrate_delegation) + /// to migrate the funds of the pool member. + fn member_needs_delegate_migration(member: AccountId) -> bool; } } diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs index caf4671191d..4809fbc0e9d 100644 --- a/substrate/frame/nomination-pools/src/adapter.rs +++ b/substrate/frame/nomination-pools/src/adapter.rs @@ -16,7 +16,7 @@ // limitations under the License. use crate::*; -use sp_staking::{DelegationInterface, DelegationMigrator}; +use sp_staking::{Agent, DelegationInterface, DelegationMigrator, Delegator}; /// Types of stake strategies. /// @@ -32,6 +32,50 @@ pub enum StakeStrategyType { Delegate, } +/// A type that only belongs in context of a pool. +/// +/// Maps directly [`Agent`] account. +#[derive(Clone, Debug)] +pub struct Pool(T); +impl Into> for Pool { + fn into(self) -> Agent { + Agent::from(self.0) + } +} +impl From for Pool { + fn from(acc: T) -> Self { + Pool(acc) + } +} + +impl Pool { + pub fn get(self) -> T { + self.0 + } +} + +/// A type that only belongs in context of a pool member. +/// +/// Maps directly [`Delegator`] account. +#[derive(Clone, Debug)] +pub struct Member(T); +impl Into> for Member { + fn into(self) -> Delegator { + Delegator::from(self.0) + } +} +impl From for Member { + fn from(acc: T) -> Self { + Member(acc) + } +} + +impl Member { + pub fn get(self) -> T { + self.0 + } +} + /// An adapter trait that can support multiple staking strategies. /// /// Depending on which staking strategy we want to use, the staking logic can be slightly @@ -64,30 +108,30 @@ pub trait StakeStrategy { /// /// This is part of the pool balance that is not actively staked. That is, tokens that are /// in unbonding period or unbonded. - fn transferable_balance(pool_account: &Self::AccountId) -> Self::Balance; + fn transferable_balance(pool_account: Pool) -> Self::Balance; /// Total balance of the pool including amount that is actively staked. - fn total_balance(pool_account: &Self::AccountId) -> Self::Balance; + fn total_balance(pool_account: Pool) -> Option; /// Amount of tokens delegated by the member. - fn member_delegation_balance(member_account: &Self::AccountId) -> Self::Balance; + fn member_delegation_balance(member_account: Member) -> Option; /// See [`StakingInterface::active_stake`]. - fn active_stake(pool_account: &Self::AccountId) -> Self::Balance { - Self::CoreStaking::active_stake(pool_account).unwrap_or_default() + fn active_stake(pool_account: Pool) -> Self::Balance { + Self::CoreStaking::active_stake(&pool_account.0).unwrap_or_default() } /// See [`StakingInterface::total_stake`]. - fn total_stake(pool_account: &Self::AccountId) -> Self::Balance { - Self::CoreStaking::total_stake(pool_account).unwrap_or_default() + fn total_stake(pool_account: Pool) -> Self::Balance { + Self::CoreStaking::total_stake(&pool_account.0).unwrap_or_default() } /// Which strategy the pool account is using. /// /// This can be different from the [`Self::strategy_type`] of the adapter if the pool has not /// migrated to the new strategy yet. - fn pool_strategy(pool_account: &Self::AccountId) -> StakeStrategyType { - match Self::CoreStaking::is_virtual_staker(pool_account) { + fn pool_strategy(pool_account: Pool) -> StakeStrategyType { + match Self::CoreStaking::is_virtual_staker(&pool_account.0) { true => StakeStrategyType::Delegate, false => StakeStrategyType::Transfer, } @@ -95,55 +139,55 @@ pub trait StakeStrategy { /// See [`StakingInterface::nominate`]. fn nominate( - pool_account: &Self::AccountId, + pool_account: Pool, validators: Vec, ) -> DispatchResult { - Self::CoreStaking::nominate(pool_account, validators) + Self::CoreStaking::nominate(&pool_account.0, validators) } /// See [`StakingInterface::chill`]. - fn chill(pool_account: &Self::AccountId) -> DispatchResult { - Self::CoreStaking::chill(pool_account) + fn chill(pool_account: Pool) -> DispatchResult { + Self::CoreStaking::chill(&pool_account.0) } /// Pledge `amount` towards `pool_account` and update the pool bond. Also see /// [`StakingInterface::bond`]. fn pledge_bond( - who: &Self::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, reward_account: &Self::AccountId, amount: Self::Balance, bond_type: BondType, ) -> DispatchResult; /// See [`StakingInterface::unbond`]. - fn unbond(pool_account: &Self::AccountId, amount: Self::Balance) -> DispatchResult { - Self::CoreStaking::unbond(pool_account, amount) + fn unbond(pool_account: Pool, amount: Self::Balance) -> DispatchResult { + Self::CoreStaking::unbond(&pool_account.0, amount) } /// See [`StakingInterface::withdraw_unbonded`]. fn withdraw_unbonded( - pool_account: &Self::AccountId, + pool_account: Pool, num_slashing_spans: u32, ) -> Result { - Self::CoreStaking::withdraw_unbonded(pool_account.clone(), num_slashing_spans) + Self::CoreStaking::withdraw_unbonded(pool_account.0, num_slashing_spans) } /// Withdraw funds from pool account to member account. fn member_withdraw( - who: &Self::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: Self::Balance, num_slashing_spans: u32, ) -> DispatchResult; /// Check if there is any pending slash for the pool. - fn has_pending_slash(pool_account: &Self::AccountId) -> bool; + fn pending_slash(pool_account: Pool) -> Self::Balance; /// Slash the member account with `amount` against pending slashes for the pool. fn member_slash( - who: &Self::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: Self::Balance, maybe_reporter: Option, ) -> DispatchResult; @@ -153,7 +197,7 @@ pub trait StakeStrategy { /// This is useful for migrating a pool account from [`StakeStrategyType::Transfer`] to /// [`StakeStrategyType::Delegate`]. fn migrate_nominator_to_agent( - pool_account: &Self::AccountId, + pool_account: Pool, reward_account: &Self::AccountId, ) -> DispatchResult; @@ -166,15 +210,15 @@ pub trait StakeStrategy { /// Internally, the member funds that are locked in the pool account are transferred back and /// locked in the member account. fn migrate_delegation( - pool: &Self::AccountId, - delegator: &Self::AccountId, + pool: Pool, + delegator: Member, value: Self::Balance, ) -> DispatchResult; /// List of validators nominated by the pool account. #[cfg(feature = "runtime-benchmarks")] - fn nominations(pool_account: &Self::AccountId) -> Option> { - Self::CoreStaking::nominations(pool_account) + fn nominations(pool_account: Pool) -> Option> { + Self::CoreStaking::nominations(&pool_account.0) } /// Remove the pool account as agent. @@ -182,7 +226,7 @@ pub trait StakeStrategy { /// Useful for migrating pool account from a delegated agent to a direct nominator. Only used /// in tests and benchmarks. #[cfg(feature = "runtime-benchmarks")] - fn remove_as_agent(_pool: &Self::AccountId) { + fn remove_as_agent(_pool: Pool) { // noop by default } } @@ -209,22 +253,24 @@ impl, AccountId = T: StakeStrategyType::Transfer } - fn transferable_balance(pool_account: &Self::AccountId) -> BalanceOf { - T::Currency::balance(pool_account).saturating_sub(Self::active_stake(pool_account)) + fn transferable_balance(pool_account: Pool) -> BalanceOf { + T::Currency::balance(&pool_account.0).saturating_sub(Self::active_stake(pool_account)) } - fn total_balance(pool_account: &Self::AccountId) -> BalanceOf { - T::Currency::total_balance(pool_account) + fn total_balance(pool_account: Pool) -> Option> { + Some(T::Currency::total_balance(&pool_account.0)) } - fn member_delegation_balance(_member_account: &T::AccountId) -> Staking::Balance { - // for transfer stake, delegation balance is always zero. - Zero::zero() + fn member_delegation_balance( + _member_account: Member, + ) -> Option { + // for transfer stake, no delegation exists. + None } fn pledge_bond( - who: &T::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, reward_account: &Self::AccountId, amount: BalanceOf, bond_type: BondType, @@ -232,36 +278,36 @@ impl, AccountId = T: match bond_type { BondType::Create => { // first bond - T::Currency::transfer(who, pool_account, amount, Preservation::Expendable)?; - Staking::bond(pool_account, amount, &reward_account) + T::Currency::transfer(&who.0, &pool_account.0, amount, Preservation::Expendable)?; + Staking::bond(&pool_account.0, amount, &reward_account) }, BondType::Extra => { // additional bond - T::Currency::transfer(who, pool_account, amount, Preservation::Preserve)?; - Staking::bond_extra(pool_account, amount) + T::Currency::transfer(&who.0, &pool_account.0, amount, Preservation::Preserve)?; + Staking::bond_extra(&pool_account.0, amount) }, } } fn member_withdraw( - who: &T::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: BalanceOf, _num_slashing_spans: u32, ) -> DispatchResult { - T::Currency::transfer(pool_account, &who, amount, Preservation::Expendable)?; + T::Currency::transfer(&pool_account.0, &who.0, amount, Preservation::Expendable)?; Ok(()) } - fn has_pending_slash(_: &Self::AccountId) -> bool { + fn pending_slash(_: Pool) -> Self::Balance { // for transfer stake strategy, slashing is greedy and never deferred. - false + Zero::zero() } fn member_slash( - _who: &T::AccountId, - _pool: &Self::AccountId, + _who: Member, + _pool: Pool, _amount: Staking::Balance, _maybe_reporter: Option, ) -> DispatchResult { @@ -269,15 +315,15 @@ impl, AccountId = T: } fn migrate_nominator_to_agent( - _pool: &Self::AccountId, + _pool: Pool, _reward_account: &Self::AccountId, ) -> DispatchResult { Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) } fn migrate_delegation( - _pool: &Self::AccountId, - _delegator: &Self::AccountId, + _pool: Pool, + _delegator: Member, _value: Self::Balance, ) -> DispatchResult { Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) @@ -314,21 +360,24 @@ impl< StakeStrategyType::Delegate } - fn transferable_balance(pool_account: &Self::AccountId) -> BalanceOf { - Delegation::agent_balance(pool_account).saturating_sub(Self::active_stake(pool_account)) + fn transferable_balance(pool_account: Pool) -> BalanceOf { + Delegation::agent_balance(pool_account.clone().into()) + // pool should always be an agent. + .defensive_unwrap_or_default() + .saturating_sub(Self::active_stake(pool_account)) } - fn total_balance(pool_account: &Self::AccountId) -> BalanceOf { - Delegation::agent_balance(pool_account) + fn total_balance(pool_account: Pool) -> Option> { + Delegation::agent_balance(pool_account.into()) } - fn member_delegation_balance(member_account: &T::AccountId) -> BalanceOf { - Delegation::delegator_balance(member_account) + fn member_delegation_balance(member_account: Member) -> Option> { + Delegation::delegator_balance(member_account.into()) } fn pledge_bond( - who: &T::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, reward_account: &Self::AccountId, amount: BalanceOf, bond_type: BondType, @@ -336,54 +385,54 @@ impl< match bond_type { BondType::Create => { // first delegation - Delegation::delegate(who, pool_account, reward_account, amount) + Delegation::delegate(who.into(), pool_account.into(), reward_account, amount) }, BondType::Extra => { // additional delegation - Delegation::delegate_extra(who, pool_account, amount) + Delegation::delegate_extra(who.into(), pool_account.into(), amount) }, } } fn member_withdraw( - who: &T::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: BalanceOf, num_slashing_spans: u32, ) -> DispatchResult { - Delegation::withdraw_delegation(&who, pool_account, amount, num_slashing_spans) + Delegation::withdraw_delegation(who.into(), pool_account.into(), amount, num_slashing_spans) } - fn has_pending_slash(pool_account: &Self::AccountId) -> bool { - Delegation::has_pending_slash(pool_account) + fn pending_slash(pool_account: Pool) -> Self::Balance { + Delegation::pending_slash(pool_account.into()).defensive_unwrap_or_default() } fn member_slash( - who: &T::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: BalanceOf, maybe_reporter: Option, ) -> DispatchResult { - Delegation::delegator_slash(pool_account, who, amount, maybe_reporter) + Delegation::delegator_slash(pool_account.into(), who.into(), amount, maybe_reporter) } fn migrate_nominator_to_agent( - pool: &Self::AccountId, + pool: Pool, reward_account: &Self::AccountId, ) -> DispatchResult { - Delegation::migrate_nominator_to_agent(pool, reward_account) + Delegation::migrate_nominator_to_agent(pool.into(), reward_account) } fn migrate_delegation( - pool: &Self::AccountId, - delegator: &Self::AccountId, + pool: Pool, + delegator: Member, value: Self::Balance, ) -> DispatchResult { - Delegation::migrate_delegation(pool, delegator, value) + Delegation::migrate_delegation(pool.into(), delegator.into(), value) } #[cfg(feature = "runtime-benchmarks")] - fn remove_as_agent(pool: &Self::AccountId) { - Delegation::drop_agent(pool) + fn remove_as_agent(pool: Pool) { + Delegation::drop_agent(pool.into()) } } diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 816334c1a08..2aaea044636 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -351,7 +351,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use adapter::StakeStrategy; +use adapter::{Member, Pool, StakeStrategy}; use codec::Codec; use frame_support::{ defensive, defensive_assert, ensure, @@ -1007,7 +1007,7 @@ impl BondedPool { /// /// This is often used for bonding and issuing new funds into the pool. fn balance_to_point(&self, new_funds: BalanceOf) -> BalanceOf { - let bonded_balance = T::StakeAdapter::active_stake(&self.bonded_account()); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(self.bonded_account())); Pallet::::balance_to_point(bonded_balance, self.points, new_funds) } @@ -1015,7 +1015,7 @@ impl BondedPool { /// /// This is often used for unbonding. fn points_to_balance(&self, points: BalanceOf) -> BalanceOf { - let bonded_balance = T::StakeAdapter::active_stake(&self.bonded_account()); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(self.bonded_account())); Pallet::::point_to_balance(bonded_balance, self.points, points) } @@ -1125,7 +1125,7 @@ impl BondedPool { fn ok_to_be_open(&self) -> Result<(), DispatchError> { ensure!(!self.is_destroying(), Error::::CanNotChangeState); - let bonded_balance = T::StakeAdapter::active_stake(&self.bonded_account()); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(self.bonded_account())); ensure!(!bonded_balance.is_zero(), Error::::OverflowRisk); let points_to_balance_ratio_floor = self @@ -1259,8 +1259,8 @@ impl BondedPool { let points_issued = self.issue(amount); T::StakeAdapter::pledge_bond( - who, - &self.bonded_account(), + Member::from(who.clone()), + Pool::from(self.bonded_account()), &self.reward_account(), amount, ty, @@ -1940,12 +1940,10 @@ pub mod pallet { NothingToAdjust, /// No slash pending that can be applied to the member. NothingToSlash, - /// No delegation to migrate. - NoDelegationToMigrate, - /// The pool has already migrated to enable delegation. - PoolAlreadyMigrated, - /// The pool has not migrated yet to enable delegation. - PoolNotMigrated, + /// The pool or member delegation has already migrated to delegate stake. + AlreadyMigrated, + /// The pool or member delegation has not migrated yet to delegate stake. + NotMigrated, /// This call is not allowed in the current state of the pallet. NotSupported, } @@ -2148,7 +2146,7 @@ pub mod pallet { // Unbond in the actual underlying nominator. let unbonding_balance = bonded_pool.dissolve(unbonding_points); - T::StakeAdapter::unbond(&bonded_pool.bonded_account(), unbonding_balance)?; + T::StakeAdapter::unbond(Pool::from(bonded_pool.bonded_account()), unbonding_balance)?; // Note that we lazily create the unbonding pools here if they don't already exist let mut sub_pools = SubPoolsStorage::::get(member.pool_id) @@ -2211,7 +2209,10 @@ pub mod pallet { // For now we only allow a pool to withdraw unbonded if its not destroying. If the pool // is destroying then `withdraw_unbonded` can be used. ensure!(pool.state != PoolState::Destroying, Error::::NotDestroying); - T::StakeAdapter::withdraw_unbonded(&pool.bonded_account(), num_slashing_spans)?; + T::StakeAdapter::withdraw_unbonded( + Pool::from(pool.bonded_account()), + num_slashing_spans, + )?; Ok(()) } @@ -2285,7 +2286,7 @@ pub mod pallet { // Before calculating the `balance_to_unbond`, we call withdraw unbonded to ensure the // `transferable_balance` is correct. let stash_killed = T::StakeAdapter::withdraw_unbonded( - &bonded_pool.bonded_account(), + Pool::from(bonded_pool.bonded_account()), num_slashing_spans, )?; @@ -2334,13 +2335,15 @@ pub mod pallet { // don't exist. This check is also defensive in cases where the unbond pool does not // update its balance (e.g. a bug in the slashing hook.) We gracefully proceed in // order to ensure members can leave the pool and it can be destroyed. - .min(T::StakeAdapter::transferable_balance(&bonded_pool.bonded_account())); + .min(T::StakeAdapter::transferable_balance(Pool::from( + bonded_pool.bonded_account(), + ))); // this can fail if the pool uses `DelegateStake` strategy and the member delegation // is not claimed yet. See `Call::migrate_delegation()`. T::StakeAdapter::member_withdraw( - &member_account, - &bonded_pool.bonded_account(), + Member::from(member_account.clone()), + Pool::from(bonded_pool.bonded_account()), balance_to_unbond, num_slashing_spans, )?; @@ -2473,7 +2476,7 @@ pub mod pallet { Error::::MinimumBondNotMet ); - T::StakeAdapter::nominate(&bonded_pool.bonded_account(), validators) + T::StakeAdapter::nominate(Pool::from(bonded_pool.bonded_account()), validators) } /// Set a new state for the pool. @@ -2666,7 +2669,7 @@ pub mod pallet { ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); } - T::StakeAdapter::chill(&bonded_pool.bonded_account()) + T::StakeAdapter::chill(Pool::from(bonded_pool.bonded_account())) } /// `origin` bonds funds from `extra` for some pool member `member` into their respective @@ -2918,25 +2921,26 @@ pub mod pallet { // ensure pool is migrated. ensure!( - T::StakeAdapter::pool_strategy(&Self::generate_bonded_account(member.pool_id)) == - adapter::StakeStrategyType::Delegate, - Error::::PoolNotMigrated + T::StakeAdapter::pool_strategy(Pool::from(Self::generate_bonded_account( + member.pool_id + ))) == adapter::StakeStrategyType::Delegate, + Error::::NotMigrated ); let pool_contribution = member.total_balance(); ensure!(pool_contribution >= MinJoinBond::::get(), Error::::MinimumBondNotMet); // the member must have some contribution to be migrated. - ensure!(pool_contribution > Zero::zero(), Error::::NoDelegationToMigrate); + ensure!(pool_contribution > Zero::zero(), Error::::AlreadyMigrated); - let delegation = T::StakeAdapter::member_delegation_balance(&member_account); - // delegation can be claimed only once. - ensure!(delegation == Zero::zero(), Error::::NoDelegationToMigrate); + let delegation = + T::StakeAdapter::member_delegation_balance(Member::from(member_account.clone())); + // delegation should not exist. + ensure!(delegation.is_none(), Error::::AlreadyMigrated); - let diff = pool_contribution.defensive_saturating_sub(delegation); T::StakeAdapter::migrate_delegation( - &Pallet::::generate_bonded_account(member.pool_id), - &member_account, - diff, + Pool::from(Pallet::::generate_bonded_account(member.pool_id)), + Member::from(member_account), + pool_contribution, )?; // if successful, we refund the fee. @@ -2968,9 +2972,9 @@ pub mod pallet { // ensure pool exists. let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!( - T::StakeAdapter::pool_strategy(&bonded_pool.bonded_account()) == + T::StakeAdapter::pool_strategy(Pool::from(bonded_pool.bonded_account())) == adapter::StakeStrategyType::Transfer, - Error::::PoolAlreadyMigrated + Error::::AlreadyMigrated ); Self::migrate_to_delegate_stake(pool_id)?; @@ -3045,7 +3049,7 @@ impl Pallet { "bonded account of dissolving pool should have no consumers" ); defensive_assert!( - T::StakeAdapter::total_stake(&bonded_pool.bonded_account()) == Zero::zero(), + T::StakeAdapter::total_stake(Pool::from(bonded_pool.bonded_account())) == Zero::zero(), "dissolving pool should not have any stake in the staking pallet" ); @@ -3068,12 +3072,14 @@ impl Pallet { "could not transfer all amount to depositor while dissolving pool" ); defensive_assert!( - T::StakeAdapter::total_balance(&bonded_pool.bonded_account()) == Zero::zero(), + T::StakeAdapter::total_balance(Pool::from(bonded_pool.bonded_account())) + .unwrap_or_default() == + Zero::zero(), "dissolving pool should not have any balance" ); // NOTE: Defensively force set balance to zero. T::Currency::set_balance(&reward_account, Zero::zero()); - // With `DelegateStake` strategy, this won't do anything. + // NOTE: With `DelegateStake` strategy, this won't do anything. T::Currency::set_balance(&bonded_pool.bonded_account(), Zero::zero()); Self::deposit_event(Event::::Destroyed { pool_id: bonded_pool.id }); @@ -3090,7 +3096,7 @@ impl Pallet { fn migrate_to_delegate_stake(id: PoolId) -> DispatchResult { T::StakeAdapter::migrate_nominator_to_agent( - &Self::generate_bonded_account(id), + Pool::from(Self::generate_bonded_account(id)), &Self::generate_reward_account(id), ) } @@ -3468,31 +3474,59 @@ impl Pallet { member_account: &T::AccountId, reporter: Option, ) -> DispatchResult { - // calculate points to be slashed. - let member = - PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; + let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; - let pool_account = Pallet::::generate_bonded_account(member.pool_id); - ensure!(T::StakeAdapter::has_pending_slash(&pool_account), Error::::NothingToSlash); - - let unslashed_balance = T::StakeAdapter::member_delegation_balance(&member_account); - let slashed_balance = member.total_balance(); - defensive_assert!( - unslashed_balance >= slashed_balance, - "unslashed balance should always be greater or equal to the slashed" - ); + let pending_slash = + Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; // if nothing to slash, return error. - ensure!(unslashed_balance > slashed_balance, Error::::NothingToSlash); + ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); T::StakeAdapter::member_slash( - &member_account, - &pool_account, - unslashed_balance.defensive_saturating_sub(slashed_balance), + Member::from(member_account.clone()), + Pool::from(Pallet::::generate_bonded_account(member.pool_id)), + pending_slash, reporter, ) } + /// Pending slash for a member. + /// + /// Takes the pool_member object corresponding to the `member_account`. + fn member_pending_slash( + member_account: Member, + pool_member: PoolMember, + ) -> Result, DispatchError> { + // only executed in tests: ensure the member account is correct. + debug_assert!( + PoolMembers::::get(member_account.clone().get()).expect("member must exist") == + pool_member + ); + + let pool_account = Pallet::::generate_bonded_account(pool_member.pool_id); + // if the pool doesn't have any pending slash, it implies the member also does not have any + // pending slash. + if T::StakeAdapter::pending_slash(Pool::from(pool_account.clone())).is_zero() { + return Ok(Zero::zero()) + } + + // this is their actual held balance that may or may not have been slashed. + let actual_balance = T::StakeAdapter::member_delegation_balance(member_account) + // no delegation implies the member delegation is not migrated yet to `DelegateStake`. + .ok_or(Error::::NotMigrated)?; + + // this is their balance in the pool + let expected_balance = pool_member.total_balance(); + + defensive_assert!( + actual_balance >= expected_balance, + "actual balance should always be greater or equal to the expected" + ); + + // return the amount to be slashed. + Ok(actual_balance.defensive_saturating_sub(expected_balance)) + } + /// Apply freeze on reward account to restrict it from going below ED. pub(crate) fn freeze_pool_deposit(reward_acc: &T::AccountId) -> DispatchResult { T::Currency::set_freeze( @@ -3656,7 +3690,7 @@ impl Pallet { pool is being destroyed and the depositor is the last member", ); - expected_tvl += T::StakeAdapter::total_stake(&bonded_pool.bonded_account()); + expected_tvl += T::StakeAdapter::total_stake(Pool::from(bonded_pool.bonded_account())); Ok(()) })?; @@ -3685,24 +3719,18 @@ impl Pallet { let subs = SubPoolsStorage::::get(pool_id).unwrap_or_default(); let sum_unbonding_balance = subs.sum_unbonding_balance(); - let bonded_balance = T::StakeAdapter::active_stake(&pool_account); - let total_balance = T::StakeAdapter::total_balance(&pool_account); - - // At the time when StakeAdapter is changed but migration is not yet done, the new - // adapter would return zero balance (as it is not an agent yet). We handle that by - // falling back to reading actual balance of the pool account. - let pool_balance = if total_balance.is_zero() { - T::Currency::total_balance(&pool_account) - } else { - total_balance - }; + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(pool_account.clone())); + let total_balance = T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) + // At the time when StakeAdapter is changed to `DelegateStake` but pool is not yet + // migrated, the total balance would be none. + .unwrap_or(T::Currency::total_balance(&pool_account)); assert!( - pool_balance >= bonded_balance + sum_unbonding_balance, - "faulty pool: {:?} / {:?}, pool_balance {:?} >= bonded_balance {:?} + sum_unbonding_balance {:?}", + total_balance >= bonded_balance + sum_unbonding_balance, + "faulty pool: {:?} / {:?}, total_balance {:?} >= bonded_balance {:?} + sum_unbonding_balance {:?}", pool_id, _pool, - pool_balance, + total_balance, bonded_balance, sum_unbonding_balance ); @@ -3799,12 +3827,75 @@ impl Pallet { pub fn api_balance_to_points(pool_id: PoolId, new_funds: BalanceOf) -> BalanceOf { if let Some(pool) = BondedPool::::get(pool_id) { let bonded_balance = - T::StakeAdapter::active_stake(&Self::generate_bonded_account(pool_id)); + T::StakeAdapter::active_stake(Pool::from(Self::generate_bonded_account(pool_id))); Pallet::::balance_to_point(bonded_balance, pool.points, new_funds) } else { Zero::zero() } } + + /// Returns the unapplied slash of the pool. + /// + /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + pub fn api_pool_pending_slash(pool_id: PoolId) -> BalanceOf { + T::StakeAdapter::pending_slash(Pool::from(Self::generate_bonded_account(pool_id))) + } + + /// Returns the unapplied slash of a member. + /// + /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { + PoolMembers::::get(who.clone()) + .map(|pool_member| { + Self::member_pending_slash(Member::from(who), pool_member).unwrap_or_default() + }) + .unwrap_or_default() + } + + /// Checks whether pool needs to be migrated to [`adapter::StakeStrategyType::Delegate`]. Only + /// applicable when the [`Config::StakeAdapter`] is [`adapter::DelegateStake`]. + /// + /// Useful to check this before calling [`Call::migrate_pool_to_delegate_stake`]. + pub fn api_pool_needs_delegate_migration(pool_id: PoolId) -> bool { + // if the `Delegate` strategy is not used in the pallet, then no migration required. + if T::StakeAdapter::strategy_type() != adapter::StakeStrategyType::Delegate { + return false + } + + let pool_account = Self::generate_bonded_account(pool_id); + // true if pool is still not migrated to `DelegateStake`. + T::StakeAdapter::pool_strategy(Pool::from(pool_account)) != + adapter::StakeStrategyType::Delegate + } + + /// Checks whether member delegation needs to be migrated to + /// [`adapter::StakeStrategyType::Delegate`]. Only applicable when the [`Config::StakeAdapter`] + /// is [`adapter::DelegateStake`]. + /// + /// Useful to check this before calling [`Call::migrate_delegation`]. + pub fn api_member_needs_delegate_migration(who: T::AccountId) -> bool { + // if the `Delegate` strategy is not used in the pallet, then no migration required. + if T::StakeAdapter::strategy_type() != adapter::StakeStrategyType::Delegate { + return false + } + + PoolMembers::::get(who.clone()) + .map(|pool_member| { + if Self::api_pool_needs_delegate_migration(pool_member.pool_id) { + // the pool needs to be migrated before members can be migrated. + return false + } + + let member_balance = pool_member.total_balance(); + let delegated_balance = + T::StakeAdapter::member_delegation_balance(Member::from(who.clone())); + + // if the member has no delegation but has some balance in the pool, then it needs + // to be migrated. + delegated_balance.is_none() && !member_balance.is_zero() + }) + .unwrap_or_default() + } } impl sp_staking::OnStakingUpdate> for Pallet { diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index a3989559dfb..a9222ea53d7 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -135,7 +135,8 @@ pub mod unversioned { let pool_acc = Pallet::::generate_bonded_account(id); // only migrate if the pool is in Transfer Strategy. - if T::StakeAdapter::pool_strategy(&pool_acc) == adapter::StakeStrategyType::Transfer + if T::StakeAdapter::pool_strategy(Pool::from(pool_acc)) == + adapter::StakeStrategyType::Transfer { let _ = Pallet::::migrate_to_delegate_stake(id).map_err(|err| { log!( @@ -178,14 +179,11 @@ pub mod unversioned { let mut pool_balances: Vec> = Vec::new(); BondedPools::::iter_keys().take(MaxPools::get() as usize).for_each(|id| { let pool_account = Pallet::::generate_bonded_account(id); - let current_strategy = T::StakeAdapter::pool_strategy(&pool_account); // we ensure migration is idempotent. - let pool_balance = if current_strategy == adapter::StakeStrategyType::Transfer { - T::Currency::total_balance(&pool_account) - } else { - T::StakeAdapter::total_balance(&pool_account) - }; + let pool_balance = T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) + // we check actual account balance if pool has not migrated yet. + .unwrap_or(T::Currency::total_balance(&pool_account)); pool_balances.push(pool_balance); }); @@ -201,14 +199,16 @@ pub mod unversioned { BondedPools::::iter_keys().take(MaxPools::get() as usize).enumerate() { let pool_account = Pallet::::generate_bonded_account(id); - if T::StakeAdapter::pool_strategy(&pool_account) == + if T::StakeAdapter::pool_strategy(Pool::from(pool_account.clone())) == adapter::StakeStrategyType::Transfer { log!(error, "Pool {} failed to migrate", id,); return Err(TryRuntimeError::Other("Pool failed to migrate")); } - let actual_balance = T::StakeAdapter::total_balance(&pool_account); + let actual_balance = + T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) + .expect("after migration, this should return a value"); let expected_balance = expected_pool_balances.get(index).unwrap(); if actual_balance != *expected_balance { @@ -1154,7 +1154,9 @@ mod helpers { pub(crate) fn calculate_tvl_by_total_stake() -> BalanceOf { BondedPools::::iter_keys() - .map(|id| T::StakeAdapter::total_stake(&Pallet::::generate_bonded_account(id))) + .map(|id| { + T::StakeAdapter::total_stake(Pool::from(Pallet::::generate_bonded_account(id))) + }) .reduce(|acc, total_balance| acc + total_balance) .unwrap_or_default() } diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 8fc339c695b..28063c2ecae 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -5021,6 +5021,10 @@ mod set_state { Error::::NotSupported ); + // pending slash api should return zero as well. + assert_eq!(Pools::api_pool_pending_slash(1), 0); + assert_eq!(Pools::api_member_pending_slash(10), 0); + // When assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); // Then @@ -7518,12 +7522,14 @@ mod delegate_stake { ); // ensure pool 1 cannot be migrated. + assert!(!Pools::api_pool_needs_delegate_migration(1)); assert_noop!( Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), Error::::NotSupported ); // members cannot be migrated either. + assert!(!Pools::api_member_needs_delegate_migration(10)); assert_noop!( Pools::migrate_delegation(RuntimeOrigin::signed(10), 11), Error::::NotSupported diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index d3235760ed2..51f6470f90d 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -35,6 +35,7 @@ use pallet_staking::{ use pallet_delegated_staking::{Error as DelegatedStakingError, Event as DelegatedStakingEvent}; use sp_runtime::{bounded_btree_map, traits::Zero}; +use sp_staking::Agent; #[test] fn pool_lifecycle_e2e() { @@ -666,6 +667,10 @@ fn pool_slash_proportional() { // Apply a slash that happened in era 100. This is typically applied with a delay. // Of the total 100, 50 is slashed. assert_eq!(BondedPools::::get(1).unwrap().points, 40); + + // no pending slash yet. + assert_eq!(Pools::api_pool_pending_slash(1), 0); + pallet_staking::slashing::do_slash::( &POOL1_BONDED, 50, @@ -674,6 +679,9 @@ fn pool_slash_proportional() { 100, ); + // Pools api returns correct slash amount. + assert_eq!(Pools::api_pool_pending_slash(1), 50); + assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] @@ -695,10 +703,14 @@ fn pool_slash_proportional() { assert_eq!(PoolMembers::::get(21).unwrap().total_balance(), 7); // But their actual balance is still unslashed. assert_eq!(Balances::total_balance_on_hold(&21), bond); + // 21 has pending slash + assert_eq!(Pools::api_member_pending_slash(21), bond - 7); // apply slash permissionlessly. assert_ok!(Pools::apply_slash(RuntimeOrigin::signed(10), 21)); // member balance is slashed. assert_eq!(Balances::total_balance_on_hold(&21), 7); + // 21 has no pending slash anymore + assert_eq!(Pools::api_member_pending_slash(21), 0); assert_eq!( delegated_staking_events_since_last_call(), @@ -977,6 +989,7 @@ fn pool_migration_e2e() { ); // with `TransferStake`, we can't migrate. + assert!(!Pools::api_pool_needs_delegate_migration(1)); assert_noop!( Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), PoolsError::::NotSupported @@ -986,22 +999,26 @@ fn pool_migration_e2e() { LegacyAdapter::set(false); // cannot migrate the member delegation unless pool is migrated first. + assert!(!Pools::api_member_needs_delegate_migration(20)); assert_noop!( Pools::migrate_delegation(RuntimeOrigin::signed(10), 20), - PoolsError::::PoolNotMigrated + PoolsError::::NotMigrated ); // migrate the pool. + assert!(Pools::api_pool_needs_delegate_migration(1)); assert_ok!(Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1)); // migrate again does not work. + assert!(!Pools::api_pool_needs_delegate_migration(1)); assert_noop!( Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), - PoolsError::::PoolAlreadyMigrated + PoolsError::::AlreadyMigrated ); // unclaimed delegations to the pool are stored in this account. - let proxy_delegator_1 = DelegatedStaking::generate_proxy_delegator(POOL1_BONDED); + let proxy_delegator_1 = + DelegatedStaking::generate_proxy_delegator(Agent::from(POOL1_BONDED)).get(); assert_eq!( delegated_staking_events_since_last_call(), @@ -1027,6 +1044,7 @@ fn pool_migration_e2e() { assert_eq!(Balances::total_balance_on_hold(&20), 0); // migrate delegation for 20. This is permissionless and can be called by anyone. + assert!(Pools::api_member_needs_delegate_migration(20)); assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 20)); // tokens moved to 20's account and held there. @@ -1071,6 +1089,7 @@ fn pool_migration_e2e() { assert_eq!(Balances::total_balance_on_hold(&21), 0); // migrate delegation for 21. + assert!(Pools::api_member_needs_delegate_migration(21)); assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 21)); // tokens moved to 21's account and held there. @@ -1098,8 +1117,16 @@ fn pool_migration_e2e() { assert_eq!(Balances::total_balance_on_hold(&22), 0); // migrate delegation for 22. + assert!(Pools::api_member_needs_delegate_migration(22)); assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 22)); + // cannot migrate a pool member again. + assert!(!Pools::api_member_needs_delegate_migration(22)); + assert_noop!( + Pools::migrate_delegation(RuntimeOrigin::signed(10), 22), + PoolsError::::AlreadyMigrated + ); + // tokens moved to 22's account and held there. assert_eq!(Balances::total_balance(&22), pre_migrate_balance_22 + 10); assert_eq!(Balances::total_balance_on_hold(&22), 10); diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 1c0a0166fd9..50182326359 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -24,7 +24,10 @@ use frame_support::{ PalletId, }; use frame_system::EnsureRoot; -use pallet_nomination_pools::{adapter::StakeStrategyType, BondType}; +use pallet_nomination_pools::{ + adapter::{Member, Pool, StakeStrategyType}, + BondType, +}; use sp_runtime::{ traits::{Convert, IdentityLookup}, BuildStorage, FixedU128, Perbill, @@ -190,21 +193,21 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { } DelegateStake::strategy_type() } - fn transferable_balance(pool_account: &Self::AccountId) -> Self::Balance { + fn transferable_balance(pool_account: Pool) -> Self::Balance { if LegacyAdapter::get() { return TransferStake::transferable_balance(pool_account) } DelegateStake::transferable_balance(pool_account) } - fn total_balance(pool_account: &Self::AccountId) -> Self::Balance { + fn total_balance(pool_account: Pool) -> Option { if LegacyAdapter::get() { return TransferStake::total_balance(pool_account) } DelegateStake::total_balance(pool_account) } - fn member_delegation_balance(member_account: &Self::AccountId) -> Self::Balance { + fn member_delegation_balance(member_account: Member) -> Option { if LegacyAdapter::get() { return TransferStake::member_delegation_balance(member_account) } @@ -212,8 +215,8 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { } fn pledge_bond( - who: &Self::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, reward_account: &Self::AccountId, amount: Self::Balance, bond_type: BondType, @@ -225,8 +228,8 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { } fn member_withdraw( - who: &Self::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: Self::Balance, num_slashing_spans: u32, ) -> DispatchResult { @@ -236,16 +239,16 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { DelegateStake::member_withdraw(who, pool_account, amount, num_slashing_spans) } - fn has_pending_slash(pool_account: &Self::AccountId) -> bool { + fn pending_slash(pool_account: Pool) -> Self::Balance { if LegacyAdapter::get() { - return TransferStake::has_pending_slash(pool_account) + return TransferStake::pending_slash(pool_account) } - DelegateStake::has_pending_slash(pool_account) + DelegateStake::pending_slash(pool_account) } fn member_slash( - who: &Self::AccountId, - pool_account: &Self::AccountId, + who: Member, + pool_account: Pool, amount: Self::Balance, maybe_reporter: Option, ) -> DispatchResult { @@ -256,7 +259,7 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { } fn migrate_nominator_to_agent( - agent: &Self::AccountId, + agent: Pool, reward_account: &Self::AccountId, ) -> DispatchResult { if LegacyAdapter::get() { @@ -266,8 +269,8 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { } fn migrate_delegation( - agent: &Self::AccountId, - delegator: &Self::AccountId, + agent: Pool, + delegator: Member, value: Self::Balance, ) -> DispatchResult { if LegacyAdapter::get() { diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 28a61cd4331..c1cf7f2778f 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -463,17 +463,48 @@ pub struct PagedExposureMetadata { pub page_count: Page, } -/// Trait to provide delegation functionality for stakers. +/// A type that belongs only in the context of an `Agent`. /// -/// Introduces two new terms to the staking system: -/// - `Delegator`: An account that delegates funds to an `Agent`. -/// - `Agent`: An account that receives delegated funds from `Delegators`. It can then use these -/// funds to participate in the staking system. It can never use its own funds to stake. They -/// (virtually bond)[`StakingUnchecked::virtual_bond`] into the staking system and can also be -/// termed as `Virtual Nominators`. +/// `Agent` is someone that manages delegated funds from [`Delegator`] accounts. It can +/// then use these funds to participate in the staking system. It can never use its own funds to +/// stake. They instead (virtually bond)[`StakingUnchecked::virtual_bond`] into the staking system +/// and are also called `Virtual Stakers`. /// -/// The `Agent` is responsible for managing rewards and slashing for all the `Delegators` that +/// The `Agent` is also responsible for managing rewards and slashing for all the `Delegators` that /// have delegated funds to it. +#[derive(Clone, Debug)] +pub struct Agent(T); +impl From for Agent { + fn from(acc: T) -> Self { + Agent(acc) + } +} + +impl Agent { + pub fn get(self) -> T { + self.0 + } +} + +/// A type that belongs only in the context of a `Delegator`. +/// +/// `Delegator` is someone that delegates funds to an `Agent`, allowing them to pool funds +/// along with other delegators and participate in the staking system. +#[derive(Clone, Debug)] +pub struct Delegator(T); +impl From for Delegator { + fn from(acc: T) -> Self { + Delegator(acc) + } +} + +impl Delegator { + pub fn get(self) -> T { + self.0 + } +} + +/// Trait to provide delegation functionality for stakers. pub trait DelegationInterface { /// Balance type used by the staking system. type Balance: Sub @@ -489,20 +520,20 @@ pub trait DelegationInterface { /// AccountId type used by the staking system. type AccountId: Clone + core::fmt::Debug; - /// Effective balance of the `Agent` account. + /// Returns effective balance of the `Agent` account. `None` if not an `Agent`. /// - /// This takes into account any pending slashes to `Agent`. - fn agent_balance(agent: &Self::AccountId) -> Self::Balance; + /// This takes into account any pending slashes to `Agent` against the delegated balance. + fn agent_balance(agent: Agent) -> Option; - /// Returns the total amount of funds delegated by a `delegator`. - fn delegator_balance(delegator: &Self::AccountId) -> Self::Balance; + /// Returns the total amount of funds delegated. `None` if not a `Delegator`. + fn delegator_balance(delegator: Delegator) -> Option; /// Delegate funds to `Agent`. /// /// Only used for the initial delegation. Use [`Self::delegate_extra`] to add more delegation. fn delegate( - delegator: &Self::AccountId, - agent: &Self::AccountId, + delegator: Delegator, + agent: Agent, reward_account: &Self::AccountId, amount: Self::Balance, ) -> DispatchResult; @@ -511,8 +542,8 @@ pub trait DelegationInterface { /// /// If this is the first delegation, use [`Self::delegate`] instead. fn delegate_extra( - delegator: &Self::AccountId, - agent: &Self::AccountId, + delegator: Delegator, + agent: Agent, amount: Self::Balance, ) -> DispatchResult; @@ -521,25 +552,25 @@ pub trait DelegationInterface { /// If there are `Agent` funds upto `amount` available to withdraw, then those funds would /// be released to the `delegator` fn withdraw_delegation( - delegator: &Self::AccountId, - agent: &Self::AccountId, + delegator: Delegator, + agent: Agent, amount: Self::Balance, num_slashing_spans: u32, ) -> DispatchResult; - /// Returns true if there are pending slashes posted to the `Agent` account. + /// Returns pending slashes posted to the `Agent` account. None if not an `Agent`. /// /// Slashes to `Agent` account are not immediate and are applied lazily. Since `Agent` /// has an unbounded number of delegators, immediate slashing is not possible. - fn has_pending_slash(agent: &Self::AccountId) -> bool; + fn pending_slash(agent: Agent) -> Option; /// Apply a pending slash to an `Agent` by slashing `value` from `delegator`. /// /// A reporter may be provided (if one exists) in order for the implementor to reward them, /// if applicable. fn delegator_slash( - agent: &Self::AccountId, - delegator: &Self::AccountId, + agent: Agent, + delegator: Delegator, value: Self::Balance, maybe_reporter: Option, ) -> DispatchResult; @@ -567,7 +598,7 @@ pub trait DelegationMigrator { /// The implementation should ensure the `Nominator` account funds are moved to an escrow /// from which `Agents` can later release funds to its `Delegators`. fn migrate_nominator_to_agent( - agent: &Self::AccountId, + agent: Agent, reward_account: &Self::AccountId, ) -> DispatchResult; @@ -576,8 +607,8 @@ pub trait DelegationMigrator { /// When a direct `Nominator` migrates to `Agent`, the funds are kept in escrow. This function /// allows the `Agent` to release the funds to the `delegator`. fn migrate_delegation( - agent: &Self::AccountId, - delegator: &Self::AccountId, + agent: Agent, + delegator: Delegator, value: Self::Balance, ) -> DispatchResult; @@ -585,7 +616,7 @@ pub trait DelegationMigrator { /// /// Also removed from [`StakingUnchecked`] as a Virtual Staker. Useful for testing. #[cfg(feature = "runtime-benchmarks")] - fn drop_agent(agent: &Self::AccountId); + fn drop_agent(agent: Agent); } sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); -- GitLab From 27a57324d1c5b9ba640f20e4c9bbab1f2d8faab2 Mon Sep 17 00:00:00 2001 From: girazoki Date: Mon, 3 Jun 2024 10:13:26 +0200 Subject: [PATCH 100/106] make all storage items in parachain-system public (#4645) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Makes all storage items in parachain-system public so that these can be used by other pallets &/or runtimes. --------- Co-authored-by: Bastian Kรถcher Co-authored-by: command-bot <> --- cumulus/pallets/parachain-system/src/lib.rs | 51 ++++++++++----------- prdoc/pr_4645.prdoc | 16 +++++++ 2 files changed, 40 insertions(+), 27 deletions(-) create mode 100644 prdoc/pr_4645.prdoc diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 3b609a675db..bbb74a1b053 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -743,14 +743,13 @@ pub mod pallet { /// The segment length is limited by the capacity returned from the [`ConsensusHook`] configured /// in the pallet. #[pallet::storage] - pub(super) type UnincludedSegment = - StorageValue<_, Vec>, ValueQuery>; + pub type UnincludedSegment = StorageValue<_, Vec>, ValueQuery>; /// Storage field that keeps track of bandwidth used by the unincluded segment along with the /// latest HRMP watermark. Used for limiting the acceptance of new blocks with /// respect to relay chain constraints. #[pallet::storage] - pub(super) type AggregatedUnincludedSegment = + pub type AggregatedUnincludedSegment = StorageValue<_, SegmentTracker, OptionQuery>; /// In case of a scheduled upgrade, this storage field contains the validation code to be @@ -760,7 +759,7 @@ pub mod pallet { /// [`:code`][sp_core::storage::well_known_keys::CODE] which will result the next block process /// with the new validation code. This concludes the upgrade process. #[pallet::storage] - pub(super) type PendingValidationCode = StorageValue<_, Vec, ValueQuery>; + pub type PendingValidationCode = StorageValue<_, Vec, ValueQuery>; /// Validation code that is set by the parachain and is to be communicated to collator and /// consequently the relay-chain. @@ -768,23 +767,23 @@ pub mod pallet { /// This will be cleared in `on_initialize` of each new block if no other pallet already set /// the value. #[pallet::storage] - pub(super) type NewValidationCode = StorageValue<_, Vec, OptionQuery>; + pub type NewValidationCode = StorageValue<_, Vec, OptionQuery>; /// The [`PersistedValidationData`] set for this block. /// This value is expected to be set only once per block and it's never stored /// in the trie. #[pallet::storage] - pub(super) type ValidationData = StorageValue<_, PersistedValidationData>; + pub type ValidationData = StorageValue<_, PersistedValidationData>; /// Were the validation data set to notify the relay chain? #[pallet::storage] - pub(super) type DidSetValidationCode = StorageValue<_, bool, ValueQuery>; + pub type DidSetValidationCode = StorageValue<_, bool, ValueQuery>; /// The relay chain block number associated with the last parachain block. /// /// This is updated in `on_finalize`. #[pallet::storage] - pub(super) type LastRelayChainBlockNumber = + pub type LastRelayChainBlockNumber = StorageValue<_, RelayChainBlockNumber, ValueQuery>; /// An option which indicates if the relay-chain restricts signalling a validation code upgrade. @@ -795,7 +794,7 @@ pub mod pallet { /// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is /// set after the inherent. #[pallet::storage] - pub(super) type UpgradeRestrictionSignal = + pub type UpgradeRestrictionSignal = StorageValue<_, Option, ValueQuery>; /// Optional upgrade go-ahead signal from the relay-chain. @@ -804,7 +803,7 @@ pub mod pallet { /// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is /// set after the inherent. #[pallet::storage] - pub(super) type UpgradeGoAhead = + pub type UpgradeGoAhead = StorageValue<_, Option, ValueQuery>; /// The state proof for the last relay parent block. @@ -814,7 +813,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] - pub(super) type RelayStateProof = StorageValue<_, sp_trie::StorageProof>; + pub type RelayStateProof = StorageValue<_, sp_trie::StorageProof>; /// The snapshot of some state related to messaging relevant to the current parachain as per /// the relay parent. @@ -824,7 +823,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] - pub(super) type RelevantMessagingState = StorageValue<_, MessagingStateSnapshot>; + pub type RelevantMessagingState = StorageValue<_, MessagingStateSnapshot>; /// The parachain host configuration that was obtained from the relay parent. /// @@ -834,53 +833,51 @@ pub mod pallet { /// This data is also absent from the genesis. #[pallet::storage] #[pallet::disable_try_decode_storage] - pub(super) type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; + pub type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; /// The last downward message queue chain head we have observed. /// /// This value is loaded before and saved after processing inbound downward messages carried /// by the system inherent. #[pallet::storage] - pub(super) type LastDmqMqcHead = StorageValue<_, MessageQueueChain, ValueQuery>; + pub type LastDmqMqcHead = StorageValue<_, MessageQueueChain, ValueQuery>; /// The message queue chain heads we have observed per each channel incoming channel. /// /// This value is loaded before and saved after processing inbound downward messages carried /// by the system inherent. #[pallet::storage] - pub(super) type LastHrmpMqcHeads = + pub type LastHrmpMqcHeads = StorageValue<_, BTreeMap, ValueQuery>; /// Number of downward messages processed in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type ProcessedDownwardMessages = StorageValue<_, u32, ValueQuery>; + pub type ProcessedDownwardMessages = StorageValue<_, u32, ValueQuery>; /// HRMP watermark that was set in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type HrmpWatermark = - StorageValue<_, relay_chain::BlockNumber, ValueQuery>; + pub type HrmpWatermark = StorageValue<_, relay_chain::BlockNumber, ValueQuery>; /// HRMP messages that were sent in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type HrmpOutboundMessages = + pub type HrmpOutboundMessages = StorageValue<_, Vec, ValueQuery>; /// Upward messages that were sent in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type UpwardMessages = StorageValue<_, Vec, ValueQuery>; + pub type UpwardMessages = StorageValue<_, Vec, ValueQuery>; /// Upward messages that are still pending and not yet send to the relay chain. #[pallet::storage] - pub(super) type PendingUpwardMessages = - StorageValue<_, Vec, ValueQuery>; + pub type PendingUpwardMessages = StorageValue<_, Vec, ValueQuery>; /// Initialization value for the delivery fee factor for UMP. #[pallet::type_value] @@ -890,29 +887,29 @@ pub mod pallet { /// The factor to multiply the base delivery fee by for UMP. #[pallet::storage] - pub(super) type UpwardDeliveryFeeFactor = + pub type UpwardDeliveryFeeFactor = StorageValue<_, FixedU128, ValueQuery, UpwardInitialDeliveryFeeFactor>; /// The number of HRMP messages we observed in `on_initialize` and thus used that number for /// announcing the weight of `on_initialize` and `on_finalize`. #[pallet::storage] - pub(super) type AnnouncedHrmpMessagesPerCandidate = StorageValue<_, u32, ValueQuery>; + pub type AnnouncedHrmpMessagesPerCandidate = StorageValue<_, u32, ValueQuery>; /// The weight we reserve at the beginning of the block for processing XCMP messages. This /// overrides the amount set in the Config trait. #[pallet::storage] - pub(super) type ReservedXcmpWeightOverride = StorageValue<_, Weight>; + pub type ReservedXcmpWeightOverride = StorageValue<_, Weight>; /// The weight we reserve at the beginning of the block for processing DMP messages. This /// overrides the amount set in the Config trait. #[pallet::storage] - pub(super) type ReservedDmpWeightOverride = StorageValue<_, Weight>; + pub type ReservedDmpWeightOverride = StorageValue<_, Weight>; /// A custom head data that should be returned as result of `validate_block`. /// /// See `Pallet::set_custom_validation_head_data` for more information. #[pallet::storage] - pub(super) type CustomValidationHeadData = StorageValue<_, Vec, OptionQuery>; + pub type CustomValidationHeadData = StorageValue<_, Vec, OptionQuery>; #[pallet::inherent] impl ProvideInherent for Pallet { diff --git a/prdoc/pr_4645.prdoc b/prdoc/pr_4645.prdoc new file mode 100644 index 00000000000..1bc65f02ea5 --- /dev/null +++ b/prdoc/pr_4645.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: make all storage items in parachain-system public + +doc: + - audience: Runtime Dev + description: | + All storage items in cumulus-pallet-parachain-systemare now public. This allows + the usage of these storage items from within other runtime-pallets + or the runtime itself. For instance, it should allow to read the latests + relay state proof to read a certain well-known-key. + +crates: + - name: cumulus-pallet-parachain-system + bump: minor \ No newline at end of file -- GitLab From 63f0cbf79a7fa9485287b646bb0a7264024cfab9 Mon Sep 17 00:00:00 2001 From: tugy <33746108+tugytur@users.noreply.github.com> Date: Mon, 3 Jun 2024 10:15:26 +0200 Subject: [PATCH 101/106] Update Amforc bootnodes for Kusama and Polkadot (#4668) Tested each bootnode with `--reserved-only` `--reserved-nodes` Kusama ``` polkadot --chain kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWKvYf6qKaAF8UUDw3KsTwjHLnvkED23yxHbH3npMe8w4G" --no-hardware-benchmarks polkadot --chain kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/kusama.bootnode.amforc.com/tcp/30001/p2p/12D3KooWKvYf6qKaAF8UUDw3KsTwjHLnvkED23yxHbH3npMe8w4G" --no-hardware-benchmarks ``` Asset Hub Kusama ``` ./polkadot-parachain --chain asset-hub-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/asset-hub-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWHy1CPndZYphwdVqMb295KPC6LRt17Ae3zNSr7evzeF5a" --no-hardware-benchmarks ./polkadot-parachain --chain asset-hub-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/asset-hub-kusama.bootnode.amforc.com/tcp/30007/p2p/12D3KooWHy1CPndZYphwdVqMb295KPC6LRt17Ae3zNSr7evzeF5a" --no-hardware-benchmarks ``` Bridge Hub Kusama ``` ./polkadot-parachain --chain bridge-hub-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWNyTBwRvCz1Ey2SgC1f3MvymhiAyLEa3cL8kU5gFH3V7Z" --no-hardware-benchmarks ./polkadot-parachain --chain bridge-hub-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30010/p2p/12D3KooWNyTBwRvCz1Ey2SgC1f3MvymhiAyLEa3cL8kU5gFH3V7Z" --no-hardware-benchmarks ``` Coretime Kusama ``` ./polkadot-parachain --chain coretime-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/coretime-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P" --no-hardware-benchmarks ./polkadot-parachain --chain coretime-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P" --no-hardware-benchmarks ``` People Kusama ``` ./polkadot-parachain --chain people-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/people-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" --no-hardware-benchmarks ./polkadot-parachain --chain people-kusama --base-path /tmp/node --reserved-only --reserved-nodes "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" --no-hardware-benchmarks ``` People Westend ``` ./polkadot-parachain --chain people-westend --base-path /tmp/node --reserved-only --reserved-nodes "/dns/people-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWE1btdwDhNpApg8BEe2QwJxdVDtz6a6BRhgTeUh9HMhWs" --no-hardware-benchmarks ./polkadot-parachain --chain people-westend --base-path /tmp/node --reserved-only --reserved-nodes "/dns/people-westend.bootnode.amforc.com/tcp/30016/p2p/12D3KooWE1btdwDhNpApg8BEe2QwJxdVDtz6a6BRhgTeUh9HMhWs" --no-hardware-benchmarks ``` Polkadot ``` polkadot --chain polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWT2HyZx5C6BBeLbCKhYG2SqJYuiu7sLMxGzUcQBko3BMr" --no-hardware-benchmarks polkadot --chain polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/polkadot.bootnode.amforc.com/tcp/30001/p2p/12D3KooWT2HyZx5C6BBeLbCKhYG2SqJYuiu7sLMxGzUcQBko3BMr" --no-hardware-benchmarks ``` Asset Hub Polkadot ``` ./polkadot-parachain --chain asset-hub-polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/asset-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDLxPXYnSHjNwq9ibqgxuzRni5VViuGNSjNe3ueqVgqE3" --no-hardware-benchmarks ./polkadot-parachain --chain asset-hub-polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/asset-hub-polkadot.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDLxPXYnSHjNwq9ibqgxuzRni5VViuGNSjNe3ueqVgqE3" --no-hardware-benchmarks ``` Bridge Hub Polkadot ``` ./polkadot-parachain --chain bridge-hub-polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp" --no-hardware-benchmarks ./polkadot-parachain --chain bridge-hub-polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp" --no-hardware-benchmarks ``` Collectives Polkadot ``` ./polkadot-parachain --chain collectives-polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/collectives-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWL6v6FHMtCP5VsiDbMHLRFiW6YBtv37BarpW3hLqnDski" --no-hardware-benchmarks ./polkadot-parachain --chain collectives-polkadot --base-path /tmp/node --reserved-only --reserved-nodes "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30013/p2p/12D3KooWL6v6FHMtCP5VsiDbMHLRFiW6YBtv37BarpW3hLqnDski" --no-hardware-benchmarks ``` --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 4 ++-- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 4 ++-- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 4 ++-- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/collectives-polkadot.json | 4 ++-- cumulus/parachains/chain-specs/coretime-kusama.json | 4 +++- cumulus/parachains/chain-specs/people-kusama.json | 4 +++- cumulus/parachains/chain-specs/people-westend.json | 4 ++-- polkadot/node/service/chain-specs/kusama.json | 4 ++-- polkadot/node/service/chain-specs/polkadot.json | 4 ++-- 10 files changed, 23 insertions(+), 17 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 00e342381ee..36cccd9b0b0 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -17,8 +17,8 @@ "/dns/statemine-bootnode.turboflakes.io/tcp/30420/wss/p2p/12D3KooWN2Qqvp5wWgjbBMpbqhKgvSibSHfomP5VWVD9VCn3VrV4", "/dns/boot-node.helikon.io/tcp/10210/p2p/12D3KooWFXRQce3aMgZMn5SxvHtYH4PsR63TZLf8LrnBsEVTyzdr", "/dns/boot-node.helikon.io/tcp/10212/wss/p2p/12D3KooWFXRQce3aMgZMn5SxvHtYH4PsR63TZLf8LrnBsEVTyzdr", - "/dns/statemine.bootnode.amforc.com/tcp/30336/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", - "/dns/statemine.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", + "/dns/asset-hub-kusama.bootnode.amforc.com/tcp/30007/p2p/12D3KooWHy1CPndZYphwdVqMb295KPC6LRt17Ae3zNSr7evzeF5a", + "/dns/asset-hub-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWHy1CPndZYphwdVqMb295KPC6LRt17Ae3zNSr7evzeF5a", "/dns/statemine-boot-ng.dwellir.com/tcp/30343/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 22b11757b66..f7f53f8d724 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -17,8 +17,8 @@ "/dns/statemint-bootnode.turboflakes.io/tcp/30415/wss/p2p/12D3KooWL8CyLww3m3pRySQGGYGNJhWDMqko3j5xi67ckP7hDUvo", "/dns/boot-node.helikon.io/tcp/10220/p2p/12D3KooW9uybhguhDjVJc3U3kgZC3i8rWmAnSpbnJkmuR7C6ZsRW", "/dns/boot-node.helikon.io/tcp/10222/wss/p2p/12D3KooW9uybhguhDjVJc3U3kgZC3i8rWmAnSpbnJkmuR7C6ZsRW", - "/dns/statemint.bootnode.amforc.com/tcp/30341/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", - "/dns/statemint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", + "/dns/asset-hub-polkadot.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDLxPXYnSHjNwq9ibqgxuzRni5VViuGNSjNe3ueqVgqE3", + "/dns/asset-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDLxPXYnSHjNwq9ibqgxuzRni5VViuGNSjNe3ueqVgqE3", "/dns/statemint-boot-ng.dwellir.com/tcp/30344/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 46b33ed44c1..2c63b52d783 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -17,8 +17,8 @@ "/dns/bridge-hub-kusama-bootnode.turboflakes.io/tcp/30715/wss/p2p/12D3KooWE3dJXbwA5SQqbDNxHfj7BXJRcy2KiXWjJY4VUMKoa7S2", "/dns/boot-node.helikon.io/tcp/10250/p2p/12D3KooWDJLkhqQdXcVKWX7CqJHnpAY6PzrPc4ZG2CUWnARbmguy", "/dns/boot-node.helikon.io/tcp/10252/wss/p2p/12D3KooWDJLkhqQdXcVKWX7CqJHnpAY6PzrPc4ZG2CUWnARbmguy", - "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30337/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", - "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", + "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30010/p2p/12D3KooWNyTBwRvCz1Ey2SgC1f3MvymhiAyLEa3cL8kU5gFH3V7Z", + "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWNyTBwRvCz1Ey2SgC1f3MvymhiAyLEa3cL8kU5gFH3V7Z", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/30337/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index 0a642caddb7..7d3ba835703 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -26,7 +26,9 @@ "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", - "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" + "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH", + "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp", + "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index b2f3ff812d0..a0d5ddff6eb 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -17,8 +17,8 @@ "/dns/collectives-polkadot-bootnode.turboflakes.io/tcp/30705/wss/p2p/12D3KooWPyzM7eX64J4aG8uRfSARakDVtiEtthEM8FUjrLWAg2sC", "/dns/boot-node.helikon.io/tcp/10230/p2p/12D3KooWS8CBz4P5CBny9aBy2EQUvAExFo9PUVT57X8r3zWMFkXT", "/dns/boot-node.helikon.io/tcp/10232/wss/p2p/12D3KooWS8CBz4P5CBny9aBy2EQUvAExFo9PUVT57X8r3zWMFkXT", - "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30335/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", - "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", + "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30013/p2p/12D3KooWL6v6FHMtCP5VsiDbMHLRFiW6YBtv37BarpW3hLqnDski", + "/dns/collectives-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWL6v6FHMtCP5VsiDbMHLRFiW6YBtv37BarpW3hLqnDski", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/30341/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", diff --git a/cumulus/parachains/chain-specs/coretime-kusama.json b/cumulus/parachains/chain-specs/coretime-kusama.json index 4ebaab82e75..f9310d6c7cc 100644 --- a/cumulus/parachains/chain-specs/coretime-kusama.json +++ b/cumulus/parachains/chain-specs/coretime-kusama.json @@ -24,7 +24,9 @@ "/dns/boot-node.helikon.io/tcp/7420/p2p/12D3KooWK4eKFpYftyuLdBdXrkdJXHKt7KZcNLb92Ufkvo17B9T2", "/dns/boot-node.helikon.io/tcp/7422/wss/p2p/12D3KooWK4eKFpYftyuLdBdXrkdJXHKt7KZcNLb92Ufkvo17B9T2", "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", - "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL" + "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", + "/dns/coretime-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P", + "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 518a7be7515..00a38b675de 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -6,7 +6,9 @@ "/dns/kusama-people-connect-0.polkadot.io/tcp/30334/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", "/dns/kusama-people-connect-1.polkadot.io/tcp/30334/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", "/dns/kusama-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", - "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm" + "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", + "/dns/people-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", + "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index 26e165b4839..8bfbb332641 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -21,8 +21,8 @@ "/dns/boot.stake.plus/tcp/46334/wss/p2p/12D3KooWLNWUF4H5WE3dy2rPB56gVcR48XY2rHwEaZ6pGTK6HYFi", "/dns/boot.gatotech.network/tcp/33340/p2p/12D3KooWHwURYtEHpexfrZa8k8hVgVi5FTFr4N8HBnn9kPDsWfgA", "/dns/boot.gatotech.network/tcp/35340/wss/p2p/12D3KooWHwURYtEHpexfrZa8k8hVgVi5FTFr4N8HBnn9kPDsWfgA", - "/dns/people-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", - "/dns/people-westend.bootnode.amforc.com/tcp/30346/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", + "/dns/people-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWE1btdwDhNpApg8BEe2QwJxdVDtz6a6BRhgTeUh9HMhWs", + "/dns/people-westend.bootnode.amforc.com/tcp/30016/p2p/12D3KooWE1btdwDhNpApg8BEe2QwJxdVDtz6a6BRhgTeUh9HMhWs", "/dns/people-westend-bootnode.turboflakes.io/tcp/30650/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", "/dns/people-westend-bootnode.turboflakes.io/tcp/30750/wss/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", "/dns/wppl16.rotko.net/tcp/33766/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 899b302155f..dfe79fd9c5e 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -14,8 +14,8 @@ "/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", "/dns/boot-node.helikon.io/tcp/7060/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", - "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", - "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", + "/dns/kusama.bootnode.amforc.com/tcp/30001/p2p/12D3KooWKvYf6qKaAF8UUDw3KsTwjHLnvkED23yxHbH3npMe8w4G", + "/dns/kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWKvYf6qKaAF8UUDw3KsTwjHLnvkED23yxHbH3npMe8w4G", "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/boot.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 04def54f794..f79b6db90fc 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -15,8 +15,8 @@ "/dns/boot.stake.plus/tcp/30334/wss/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", "/dns/boot-node.helikon.io/tcp/7070/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", - "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", - "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", + "/dns/polkadot.bootnode.amforc.com/tcp/30001/p2p/12D3KooWT2HyZx5C6BBeLbCKhYG2SqJYuiu7sLMxGzUcQBko3BMr", + "/dns/polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWT2HyZx5C6BBeLbCKhYG2SqJYuiu7sLMxGzUcQBko3BMr", "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/boot.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", -- GitLab From f66e693a6befef0956a3129254fbe568247c9c57 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Mon, 3 Jun 2024 10:30:36 +0200 Subject: [PATCH 102/106] Add chain-spec-builder docker image (#4655) This PR adds possibility to publish container images for the `chain-spec-builder` binary on the regular basis. Related to: https://github.com/paritytech/release-engineering/issues/190 --- .../workflows/release-50_publish-docker.yml | 62 +++++++++++-------- docker/dockerfiles/binary_injected.Dockerfile | 2 +- .../chain-spec-builder/build-injected.sh | 14 +++++ .../scripts/chain-spec-builder/test-build.sh | 19 ++++++ 4 files changed, 70 insertions(+), 27 deletions(-) create mode 100755 docker/scripts/chain-spec-builder/build-injected.sh create mode 100755 docker/scripts/chain-spec-builder/test-build.sh diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 67e93ee9657..4679f58578f 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -27,6 +27,7 @@ on: options: - polkadot - polkadot-parachain + - chain-spec-builder release_id: description: | @@ -74,7 +75,7 @@ env: jobs: fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest steps: @@ -97,7 +98,7 @@ jobs: - name: Fetch rc artifacts or release artifacts from s3 based on version #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' }} + if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh @@ -106,15 +107,22 @@ jobs: fetch_release_artifacts_from_s3 - - name: Cache the artifacts - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 + - name: Fetch chain-spec-builder rc artifacts or release artifacts based on release id + #this step runs only if the workflow is triggered manually and only for chain-spec-builder + if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary == 'chain-spec-builder' }} + run: | + . ./.github/scripts/common/lib.sh + RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + fetch_release_artifacts + + - name: Upload artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - key: artifacts-${{ env.BINARY }}-${{ github.sha }} - path: | - ./release-artifacts/${{ env.BINARY }}/**/* + name: release-artifacts + path: release-artifacts/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest needs: fetch-artifacts environment: release @@ -123,26 +131,23 @@ jobs: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Get artifacts from cache - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - key: artifacts-${{ env.BINARY }}-${{ github.sha }} - fail-on-cache-miss: true - path: | - ./release-artifacts/${{ env.BINARY }}/**/* + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 - name: Check sha256 ${{ env.BINARY }} - working-directory: ./release-artifacts/${{ env.BINARY }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + working-directory: release-artifacts run: | - . ../../.github/scripts/common/lib.sh + . ../.github/scripts/common/lib.sh echo "Checking binary $BINARY" check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - working-directory: ./release-artifacts/${{ env.BINARY }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + working-directory: release-artifacts run: | - . ../../.github/scripts/common/lib.sh + . ../.github/scripts/common/lib.sh import_gpg_keys check_gpg $BINARY @@ -164,20 +169,21 @@ jobs: echo "No tag, doing without" - name: Fetch release tags - working-directory: ./release-artifacts/${{ env.BINARY }} + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs run: | chmod a+rx $BINARY - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) + [[ $BINARY != 'chain-spec-builder' ]] && VERSION=$(./$BINARY --version | awk '{ print $2 }' ) + release=$( echo $VERSION | cut -f1 -d- ) echo "tag=latest" >> $GITHUB_OUTPUT echo "release=${release}" >> $GITHUB_OUTPUT - - name: Build Injected Container image for polkadot rc - if: ${{ env.BINARY == 'polkadot' }} + - name: Build Injected Container image for polkadot rc or chain-spec-builder + if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'chain-spec-builder' }} env: - ARTIFACTS_FOLDER: ./release-artifacts + ARTIFACTS_FOLDER: release-artifacts IMAGE_NAME: ${{ env.BINARY }} OWNER: ${{ env.DOCKER_OWNER }} TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} @@ -189,7 +195,7 @@ jobs: - name: Build Injected Container image for polkadot-parachain if: ${{ env.BINARY == 'polkadot-parachain' }} env: - ARTIFACTS_FOLDER: ./release-artifacts + ARTIFACTS_FOLDER: release-artifacts IMAGE_NAME: ${{ env.BINARY }} OWNER: ${{ env.DOCKER_OWNER }} DOCKERFILE: docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile @@ -219,7 +225,11 @@ jobs: RELEASE_TAG: ${{ steps.fetch_rc_refs.outputs.release || steps.fetch_release_refs.outputs.release }} run: | echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}" - $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version + if [[ ${BINARY} == 'chain-spec-builder' ]]; then + $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} + else + $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version + fi fetch-latest-debian-package-version: # this job will be triggered for polkadot release build if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} diff --git a/docker/dockerfiles/binary_injected.Dockerfile b/docker/dockerfiles/binary_injected.Dockerfile index c8930bd83f0..26c0ef7ae64 100644 --- a/docker/dockerfiles/binary_injected.Dockerfile +++ b/docker/dockerfiles/binary_injected.Dockerfile @@ -32,7 +32,7 @@ LABEL io.parity.image.authors=${AUTHORS} \ USER root WORKDIR /app -# add polkadot binary to docker image +# add binary to docker image # sample for polkadot: COPY ./polkadot ./polkadot-*-worker /usr/local/bin/ COPY entrypoint.sh . COPY "bin/*" "/usr/local/bin/" diff --git a/docker/scripts/chain-spec-builder/build-injected.sh b/docker/scripts/chain-spec-builder/build-injected.sh new file mode 100755 index 00000000000..ede6cee3851 --- /dev/null +++ b/docker/scripts/chain-spec-builder/build-injected.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=chain-spec-builder +export ARTIFACTS_FOLDER=$1 +# export TAGS=... + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/chain-spec-builder/test-build.sh b/docker/scripts/chain-spec-builder/test-build.sh new file mode 100755 index 00000000000..a42cab97703 --- /dev/null +++ b/docker/scripts/chain-spec-builder/test-build.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +export TAGS=latest,beta,7777,1.0.2-rc23 + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/chain-spec-builder -c \ + 'cp "$(which chain-spec-builder)" /export' + +echo "Checking binaries we got:" +ls -al $TMP + +./build-injected.sh $TMP -- GitLab From dfb01aaa046ddfb4d7606c8e0b7112c9f719288a Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 3 Jun 2024 11:51:51 +0200 Subject: [PATCH 103/106] Revamp the Readme of the minimal template (#4649) - Addresses [this](https://github.com/paritytech/polkadot-sdk/issues/3155#issuecomment-2126934939). - Technical content got adopted from the existing [solochain readme](https://github.com/paritytech/polkadot-sdk/tree/master/templates/solochain). - Updated some broken links there. - The docker instructions will work after https://github.com/paritytech/polkadot-sdk/pull/4637. - See the [rendered version](https://github.com/paritytech/polkadot-sdk/blob/rzadp/minimal-template-readme/templates/minimal/README.md). --------- Co-authored-by: gupnik --- templates/minimal/README.md | 96 ++++++++++++++++++++++++++--- templates/minimal/node/README.md | 14 +++++ templates/minimal/pallets/README.md | 9 +++ templates/minimal/runtime/README.md | 8 +++ templates/solochain/README.md | 7 +-- 5 files changed, 123 insertions(+), 11 deletions(-) create mode 100644 templates/minimal/node/README.md create mode 100644 templates/minimal/pallets/README.md create mode 100644 templates/minimal/runtime/README.md diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 0541e393db9..3488bc43cc9 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -1,13 +1,95 @@ -# Minimal Template +
-This is a minimal template for creating a blockchain using the Polkadot SDK. +# Polkadot SDK's Minimal Template -# Docs +Polkadot SDK Logo +Polkadot SDK Logo -You can generate and view the [Rust -Docs](https://doc.rust-lang.org/cargo/commands/cargo-doc.html) for this template -with this command: +> This is a minimal template for creating a blockchain based on Polkadot SDK. +> +> This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). + +
+ +๐Ÿค This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. + +๐Ÿ”ง It's runtime is configured of a single custom pallet as a staring point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). + +๐Ÿ‘ค The template has no consensus configured - it is best for experimenting with a single node network. + +## Template Structure + +A Polkadot SDK based project such as this one consists of: + +- ๐Ÿ’ฟ a [Node](./node/README.md) - the binary application. +- ๐Ÿงฎ the [Runtime](./runtime/README.md) - the core logic of the blockchain. +- ๐ŸŽจ the [Pallets](./pallets/README.md) - from which the runtime is constructed. + +## Getting Started + +๐Ÿฆ€ The template is using the Rust language. + +๐Ÿ‘‰ Check the +[Rust installation instructions](https://www.rust-lang.org/tools/install) for your system. + +๐Ÿ› ๏ธ Depending on your operating system and Rust version, there might be additional +packages required to compile this template - please take note of the Rust compiler output. + +### Build + +๐Ÿ”จ Use the following command to build the node without launching it: ```sh -cargo doc -p minimal-template --open +cargo build --release ``` + +๐Ÿณ Alternatively, build the docker image: + +```sh +docker build . -t polkadot-sdk-minimal-template +``` + +### Single-Node Development Chain + +๐Ÿ‘ค The following command starts a single-node development chain: + +```sh +./target/release/minimal-template-node --dev + +# docker version: +docker run --rm polkadot-sdk-minimal-template --dev +``` + +Development chains: + +- ๐Ÿงน Do not persist the state. +- ๐Ÿ’ฐ Are preconfigured with a genesis state that includes several prefunded development accounts. +- ๐Ÿง‘โ€โš–๏ธ Development accounts are used as `sudo` accounts. + +### Connect with the Polkadot-JS Apps Front-End + +๐ŸŒ You can interact with your local node using the +hosted version of the [Polkadot/Substrate +Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944). + +๐Ÿช A hosted version is also +available on [IPFS](https://dotapps.io/). + +๐Ÿง‘โ€๐Ÿ”ง You can also find the source code and instructions for hosting your own instance in the +[`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. + +## Contributing + +๐Ÿ”„ This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). + +โžก๏ธ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/minimal). + +๐Ÿ˜‡ Please refer to the monorepo's [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). + +## Getting Help + +๐Ÿง‘โ€๐Ÿซ To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. + +๐Ÿง‘โ€๐Ÿ”ง For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are the Polkadot SDK documentation resources. + +๐Ÿ‘ฅ Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and [Substrate StackExchange](https://substrate.stackexchange.com/). diff --git a/templates/minimal/node/README.md b/templates/minimal/node/README.md new file mode 100644 index 00000000000..04a916f5053 --- /dev/null +++ b/templates/minimal/node/README.md @@ -0,0 +1,14 @@ +# Node + +โ„น๏ธ A node - in Polkadot - is a binary executable, whose primary purpose is to execute the [runtime](../runtime/README.md). + +๐Ÿ”— It communicates with other nodes in the network, and aims for [consensus](https://wiki.polkadot.network/docs/learn-consensus) among them. + +โš™๏ธ It acts as a remote procedure call (RPC) server, allowing interaction with the blockchain. + +๐Ÿ‘‰ Learn more about the architecture, and a difference between a node and a runtime [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/wasm_meta_protocol/index.html). + +๐Ÿ‘‡ Here are the most important files in this node template: + +- [`chain_spec.rs`](./src/chain_spec.rs): A chain specification is a source code file that defines the chain's initial (genesis) state. +- [`service.rs`](./src/service.rs): This file defines the node implementation. It's a place to configure consensus-related topics. In favor of minimalism, this template has no consensus configured. diff --git a/templates/minimal/pallets/README.md b/templates/minimal/pallets/README.md new file mode 100644 index 00000000000..26003638e9a --- /dev/null +++ b/templates/minimal/pallets/README.md @@ -0,0 +1,9 @@ +# Pallets + +โ„น๏ธ A pallet is a unit of encapsulated logic, with a clearly defined responsibility. A pallet is analogous to a module in the runtime. + +๐Ÿ’ In this template, there is a simple custom pallet based on the FRAME framework. + +๐Ÿ‘‰ Learn more about FRAME [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). + +๐Ÿง‘โ€๐Ÿซ Please refer to [this guide](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/your_first_pallet/index.html) to learn how to write a basic pallet. diff --git a/templates/minimal/runtime/README.md b/templates/minimal/runtime/README.md new file mode 100644 index 00000000000..2fdfef8bc35 --- /dev/null +++ b/templates/minimal/runtime/README.md @@ -0,0 +1,8 @@ +# Runtime + +โ„น๏ธ The runtime (in other words, a state transition function), refers to the core logic of the blockchain that is responsible for +validating blocks and executing the state changes they define. + +๐Ÿ’ The runtime in this template is constructed using ready-made FRAME pallets that ship with [Polkadot SDK](https://github.com/paritytech/polkadot-sdk), and a [template for a custom pallet](../pallets/README.md). + +๐Ÿ‘‰ Learn more about FRAME [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 37c65797dcb..2e3b1146a8f 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -103,9 +103,8 @@ After you start the node template locally, you can interact with it using the hosted version of the [Polkadot/Substrate Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) front-end by connecting to the local node endpoint. A hosted version is also -available on [IPFS (redirect) here](https://dotapps.io/) or [IPNS (direct) -here](ipns://dotapps.io/?rpc=ws%3A%2F%2F127.0.0.1%3A9944#/explorer). You can -also find the source code and instructions for hosting your own instance on the +available on [IPFS](https://dotapps.io/). You can +also find the source code and instructions for hosting your own instance in the [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. ### Multi-Node Local Testnet @@ -131,7 +130,7 @@ capabilities: the network. Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation - research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). + research](https://research.web3.foundation/Polkadot/protocols/NPoS). - RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. -- GitLab From 6ede7a05bd978f829b66a774ae5fb041c3ee064c Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 3 Jun 2024 12:44:01 +0200 Subject: [PATCH 104/106] Add Dockerfiles to the templates (#4637) As requested [here](https://github.com/paritytech/polkadot-sdk/issues/3155#issuecomment-2126934939). The Dockerfiles are inspired by [this one](https://github.com/paritytech/polkadot-sdk/blob/aa32faaebf64426becb2feeede347740eb7a3908/docker/dockerfiles/polkadot/polkadot_builder.Dockerfile). --- templates/minimal/.dockerignore | 3 +++ templates/minimal/Dockerfile | 28 ++++++++++++++++++++++++++++ templates/parachain/.dockerignore | 3 +++ templates/parachain/Dockerfile | 28 ++++++++++++++++++++++++++++ templates/solochain/.dockerignore | 3 +++ templates/solochain/Dockerfile | 28 ++++++++++++++++++++++++++++ 6 files changed, 93 insertions(+) create mode 100644 templates/minimal/.dockerignore create mode 100644 templates/minimal/Dockerfile create mode 100644 templates/parachain/.dockerignore create mode 100644 templates/parachain/Dockerfile create mode 100644 templates/solochain/.dockerignore create mode 100644 templates/solochain/Dockerfile diff --git a/templates/minimal/.dockerignore b/templates/minimal/.dockerignore new file mode 100644 index 00000000000..da6a8f2620d --- /dev/null +++ b/templates/minimal/.dockerignore @@ -0,0 +1,3 @@ +target/ +Dockerfile +.dockerignore diff --git a/templates/minimal/Dockerfile b/templates/minimal/Dockerfile new file mode 100644 index 00000000000..0c59192208f --- /dev/null +++ b/templates/minimal/Dockerfile @@ -0,0 +1,28 @@ +FROM docker.io/paritytech/ci-unified:latest as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo fetch +RUN cargo build --locked --release + +FROM docker.io/parity/base-bin:latest + +COPY --from=builder /polkadot/target/release/minimal-template-node /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/minimal-template-node --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/minimal-template-node"] diff --git a/templates/parachain/.dockerignore b/templates/parachain/.dockerignore new file mode 100644 index 00000000000..da6a8f2620d --- /dev/null +++ b/templates/parachain/.dockerignore @@ -0,0 +1,3 @@ +target/ +Dockerfile +.dockerignore diff --git a/templates/parachain/Dockerfile b/templates/parachain/Dockerfile new file mode 100644 index 00000000000..72a8f19fe79 --- /dev/null +++ b/templates/parachain/Dockerfile @@ -0,0 +1,28 @@ +FROM docker.io/paritytech/ci-unified:latest as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo fetch +RUN cargo build --locked --release + +FROM docker.io/parity/base-bin:latest + +COPY --from=builder /polkadot/target/release/parachain-template-node /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/parachain-template-node --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/parachain-template-node"] diff --git a/templates/solochain/.dockerignore b/templates/solochain/.dockerignore new file mode 100644 index 00000000000..da6a8f2620d --- /dev/null +++ b/templates/solochain/.dockerignore @@ -0,0 +1,3 @@ +target/ +Dockerfile +.dockerignore diff --git a/templates/solochain/Dockerfile b/templates/solochain/Dockerfile new file mode 100644 index 00000000000..97e6dd29107 --- /dev/null +++ b/templates/solochain/Dockerfile @@ -0,0 +1,28 @@ +FROM docker.io/paritytech/ci-unified:latest as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo fetch +RUN cargo build --locked --release + +FROM docker.io/parity/base-bin:latest + +COPY --from=builder /polkadot/target/release/solochain-template-node /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/solochain-template-node --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/solochain-template-node"] -- GitLab From 73ac7375a5421bbc142bef232ab23d221ead64c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 3 Jun 2024 13:04:29 +0200 Subject: [PATCH 105/106] Fix umbrella CI check and fix the C&P message (#4670) --- .github/workflows/checks-quick.yml | 7 +++---- Cargo.lock | 1 + umbrella/Cargo.toml | 7 ++++++- umbrella/src/lib.rs | 5 +++++ 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml index cd9baf0d1bc..c4382d1b9b4 100644 --- a/.github/workflows/checks-quick.yml +++ b/.github/workflows/checks-quick.yml @@ -134,13 +134,12 @@ jobs: run: | python3 scripts/generate-umbrella.py --sdk . --version 0.1.0 cargo +nightly fmt --all + if [ -n "$(git status --porcelain)" ]; then cat < Date: Mon, 3 Jun 2024 14:22:06 +0200 Subject: [PATCH 106/106] [ci] Delete unused flow (#4676) --- .../release-build-and-attach-runtimes.yml | 65 ------------------- 1 file changed, 65 deletions(-) delete mode 100644 .github/workflows/release-build-and-attach-runtimes.yml diff --git a/.github/workflows/release-build-and-attach-runtimes.yml b/.github/workflows/release-build-and-attach-runtimes.yml deleted file mode 100644 index 680a9ecffd3..00000000000 --- a/.github/workflows/release-build-and-attach-runtimes.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Build and Attach Runtimes to Releases/RC - -on: - release: - types: - - published - -env: - PROFILE: production - -jobs: - build_and_upload: - strategy: - matrix: - runtime: - - { name: westend, package: westend-runtime, path: polkadot/runtime/westend } - - { name: rococo, package: rococo-runtime, path: polkadot/runtime/rococo } - - { name: asset-hub-rococo, package: asset-hub-rococo-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-rococo } - - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } - - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } - - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } - - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } - - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } - build_config: - # Release build has logging disabled and no dev features - - { type: on-chain-release, opts: --features on-chain-release-build } - # Debug build has logging enabled and developer features - - { type: dev-debug-build, opts: --features try-runtime } - - runs-on: ubuntu-22.04 - - steps: - - name: Checkout code - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} - id: srtool_build - uses: chevdor/srtool-actions@v0.9.2 - env: - BUILD_OPTS: ${{ matrix.build_config.opts }} - with: - chain: ${{ matrix.runtime.name }} - package: ${{ matrix.runtime.package }} - runtime_dir: ${{ matrix.runtime.path }} - profile: ${{ env.PROFILE }} - - - name: Set up paths and runtime names - id: setup - run: | - RUNTIME_BLOB_NAME=$(echo ${{ matrix.runtime.package }} | sed 's/-/_/g').compact.compressed.wasm - PREFIX=${{ matrix.build_config.type == 'dev-debug-build' && 'DEV_DEBUG_BUILD__' || '' }} - - echo "RUNTIME_BLOB_NAME=$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - echo "ASSET_PATH=./${{ matrix.runtime.path }}/target/srtool/${{ env.PROFILE }}/wbuild/${{ matrix.runtime.package }}/$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - echo "ASSET_NAME=$PREFIX$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - - - name: Upload Runtime to Release - uses: actions/upload-release-asset@v1 - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: ${{ env.ASSET_PATH }} - asset_name: ${{ env.ASSET_NAME }} - asset_content_type: application/octet-stream - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -- GitLab