From 6f73b746d3caee00030e55369f9e5e0ddac15d83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Apr 2024 15:06:22 +0200 Subject: [PATCH 001/107] pallet_broker: Support renewing leases expired in a previous period (#4089) Part of: https://github.com/paritytech/polkadot-sdk/issues/4107 --- Cargo.lock | 1 + prdoc/pr_4089.prdoc | 11 ++ substrate/frame/broker/Cargo.toml | 1 + substrate/frame/broker/src/mock.rs | 11 +- substrate/frame/broker/src/tests.rs | 139 +++++++++++++++++++++++ substrate/frame/broker/src/tick_impls.rs | 13 ++- 6 files changed, 169 insertions(+), 7 deletions(-) create mode 100644 prdoc/pr_4089.prdoc diff --git a/Cargo.lock b/Cargo.lock index 27cb7af04d6..2339aff34bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9937,6 +9937,7 @@ dependencies = [ "frame-support", "frame-system", "parity-scale-codec", + "pretty_assertions", "scale-info", "sp-api", "sp-arithmetic", diff --git a/prdoc/pr_4089.prdoc b/prdoc/pr_4089.prdoc new file mode 100644 index 00000000000..29ac736ccd0 --- /dev/null +++ b/prdoc/pr_4089.prdoc @@ -0,0 +1,11 @@ +title: "pallet_broker: Support renewing leases expired in a previous period" + +doc: + - audience: Runtime User + description: | + Allow renewals of leases that ended before `start_sales` or in the first period after calling `start_sales`. + This ensures that everyone has a smooth experience when migrating to coretime and the timing of + `start_sales` isn't that important. + +crates: + - name: pallet-broker diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 969f13e269d..f36b94c3cec 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -29,6 +29,7 @@ frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-io = { path = "../../primitives/io" } +pretty_assertions = "1.3.0" [features] default = ["std"] diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index c7205058c97..6219b4eff1b 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -29,7 +29,7 @@ use frame_support::{ }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_arithmetic::Perbill; -use sp_core::{ConstU32, ConstU64}; +use sp_core::{ConstU32, ConstU64, Get}; use sp_runtime::{ traits::{BlockNumberProvider, Identity}, BuildStorage, Saturating, @@ -210,6 +210,15 @@ pub fn advance_to(b: u64) { } } +pub fn advance_sale_period() { + let sale = SaleInfo::::get().unwrap(); + + let target_block_number = + sale.region_begin as u64 * <::TimeslicePeriod as Get>::get(); + + advance_to(target_block_number) +} + pub fn pot() -> u64 { balance(Broker::account_id()) } diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index 0aacd7ef369..7fc7e31e9bb 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -24,6 +24,7 @@ use frame_support::{ BoundedVec, }; use frame_system::RawOrigin::Root; +use pretty_assertions::assert_eq; use sp_runtime::{traits::Get, TokenError}; use CoreAssignment::*; use CoretimeTraceItem::*; @@ -1092,3 +1093,141 @@ fn config_works() { assert_noop!(Broker::configure(Root.into(), cfg), Error::::InvalidConfig); }); } + +/// Ensure that a lease that ended before `start_sales` was called can be renewed. +#[test] +fn renewal_works_leases_ended_before_start_sales() { + TestExt::new().endow(1, 1000).execute_with(|| { + let config = Configuration::::get().unwrap(); + + // This lease is ended before `start_stales` was called. + assert_ok!(Broker::do_set_lease(1, 1)); + + // Go to some block to ensure that the lease of task 1 already ended. + advance_to(5); + + // This lease will end three sale periods in. + assert_ok!(Broker::do_set_lease( + 2, + Broker::latest_timeslice_ready_to_commit(&config) + config.region_length * 3 + )); + + // This intializes the first sale and the period 0. + assert_ok!(Broker::do_start_sales(100, 2)); + assert_noop!(Broker::do_renew(1, 1), Error::::Unavailable); + assert_noop!(Broker::do_renew(1, 0), Error::::Unavailable); + + // Lease for task 1 should have been dropped. + assert!(Leases::::get().iter().any(|l| l.task == 2)); + + // This intializes the second and the period 1. + advance_sale_period(); + + // Now we can finally renew the core 0 of task 1. + let new_core = Broker::do_renew(1, 0).unwrap(); + // Renewing the active lease doesn't work. + assert_noop!(Broker::do_renew(1, 1), Error::::SoldOut); + assert_eq!(balance(1), 900); + + // This intializes the third sale and the period 2. + advance_sale_period(); + let new_core = Broker::do_renew(1, new_core).unwrap(); + + // Renewing the active lease doesn't work. + assert_noop!(Broker::do_renew(1, 0), Error::::SoldOut); + assert_eq!(balance(1), 800); + + // All leases should have ended + assert!(Leases::::get().is_empty()); + + // This intializes the fourth sale and the period 3. + advance_sale_period(); + + // Renew again + assert_eq!(0, Broker::do_renew(1, new_core).unwrap()); + // Renew the task 2. + assert_eq!(1, Broker::do_renew(1, 0).unwrap()); + assert_eq!(balance(1), 600); + + // This intializes the fifth sale and the period 4. + advance_sale_period(); + + assert_eq!( + CoretimeTrace::get(), + vec![ + ( + 10, + AssignCore { + core: 0, + begin: 12, + assignment: vec![(Task(1), 57600)], + end_hint: None + } + ), + ( + 10, + AssignCore { + core: 1, + begin: 12, + assignment: vec![(Task(2), 57600)], + end_hint: None + } + ), + ( + 16, + AssignCore { + core: 0, + begin: 18, + assignment: vec![(Task(2), 57600)], + end_hint: None + } + ), + ( + 16, + AssignCore { + core: 1, + begin: 18, + assignment: vec![(Task(1), 57600)], + end_hint: None + } + ), + ( + 22, + AssignCore { + core: 0, + begin: 24, + assignment: vec![(Task(2), 57600)], + end_hint: None, + }, + ), + ( + 22, + AssignCore { + core: 1, + begin: 24, + assignment: vec![(Task(1), 57600)], + end_hint: None, + }, + ), + ( + 28, + AssignCore { + core: 0, + begin: 30, + assignment: vec![(Task(1), 57600)], + end_hint: None, + }, + ), + ( + 28, + AssignCore { + core: 1, + begin: 30, + assignment: vec![(Task(2), 57600)], + end_hint: None, + }, + ), + ] + ); + }); +} diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 388370bce4d..04e9a65bf8f 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -216,11 +216,10 @@ impl Pallet { let assignment = CoreAssignment::Task(task); let schedule = BoundedVec::truncate_from(vec![ScheduleItem { mask, assignment }]); Workplan::::insert((region_begin, first_core), &schedule); - // Separate these to avoid missed expired leases hanging around forever. - let expired = until < region_end; - let expiring = until >= region_begin && expired; - if expiring { - // last time for this one - make it renewable. + // Will the lease expire at the end of the period? + let expire = until < region_end; + if expire { + // last time for this one - make it renewable in the next sale. let renewal_id = AllowedRenewalId { core: first_core, when: region_end }; let record = AllowedRenewalRecord { price, completion: Complete(schedule) }; AllowedRenewals::::insert(renewal_id, &record); @@ -232,8 +231,10 @@ impl Pallet { }); Self::deposit_event(Event::LeaseEnding { when: region_end, task }); } + first_core.saturating_inc(); - !expired + + !expire }); Leases::::put(&leases); -- GitLab From 8b4cfda7589325d1a34f70b3770ab494a9d4052c Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Mon, 15 Apr 2024 15:46:14 +0200 Subject: [PATCH 002/107] added script to require a review post push (#3431) Closes https://github.com/paritytech/opstooling/issues/174 Added a new step in the action that triggers review bot to stop approval from new pushes. This step works in the following way: - If the **author of the PR**, who **is not** a member of the org, pushed a new commit then: - Review-Trigger requests new reviews from the reviewers and fails. It *does not dismiss reviews*. It simply request them again, but they will still be available. This way, if the author changed something in the code, they will still need to have this latest change approved to stop them from uploading malicious code. Find the requested issue linked to this PR (it is from a private repo so I can't link it here) --- .github/workflows/review-trigger.yml | 32 ++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 8b23dd30bb2..061cf4ab09e 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -21,6 +21,38 @@ jobs: - name: Skip merge queue if: ${{ contains(github.ref, 'gh-readonly-queue') }} run: exit 0 + - name: Get comments + id: comments + run: echo "bodies=$(gh pr view ${{ github.event.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT" + env: + GH_TOKEN: ${{ github.token }} + - name: Fail when author pushes new code + # Require new reviews when the author is pushing and he is not a member + if: | + github.event_name == 'pull_request_target' && + github.event.action == 'synchronize' && + github.event.sender.login == github.event.pull_request.user.login && + github.event.pull_request.author_association != 'MEMBER' + run: | + # We get the list of reviewers who approved the PR + REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.number }}/reviews \ + --jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}') + + # We request them to review again + echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.number }}/requested_reviewers --input - + + echo "::error::Project needs to be reviewed again" + exit 1 + env: + GH_TOKEN: ${{ github.token }} + - name: Comment requirements + # If the previous step failed and github-actions hasn't commented yet we comment instructions + if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed') + run: | + gh pr comment ${{ github.event.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" + env: + GH_TOKEN: ${{ github.token }} + COMMENTS: ${{ steps.comments.outputs.users }} - name: Get PR number env: PR_NUMBER: ${{ github.event.pull_request.number }} -- GitLab From d1f9fe0a994febff13f21b0edd223e838754e2fc Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Mon, 15 Apr 2024 17:33:55 +0300 Subject: [PATCH 003/107] logging(fix): Use the proper log target for logging (#4124) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR ensures the proper logging target (ie `libp2p_tcp` or `beefy`) is displayed. The issue has been introduced in: https://github.com/paritytech/polkadot-sdk/pull/4059, which removes the normalized metadata of logs. From [documentation](https://docs.rs/tracing-log/latest/tracing_log/trait.NormalizeEvent.html#tymethod.normalized_metadata): > In tracing-log, an Event produced by a log (through [AsTrace](https://docs.rs/tracing-log/latest/tracing_log/trait.AsTrace.html)) has an hard coded “log” target > [normalized_metadata](https://docs.rs/tracing-log/latest/tracing_log/trait.NormalizeEvent.html#tymethod.normalized_metadata): If this Event comes from a log, this method provides a new normalized Metadata which has all available attributes from the original log, including file, line, module_path and target This has low implications if a version was deployed containing the mentioned pull request, as we'll lose the ability to distinguish between log targets. ### Before this PR ``` 2024-04-15 12:45:40.327 INFO main log: Parity Polkadot 2024-04-15 12:45:40.328 INFO main log: ✌️ version 1.10.0-d1b0ef76a8b 2024-04-15 12:45:40.328 INFO main log: ❤️ by Parity Technologies , 2017-2024 2024-04-15 12:45:40.328 INFO main log: 📋 Chain specification: Development 2024-04-15 12:45:40.328 INFO main log: 🏷 Node name: yellow-eyes-2963 2024-04-15 12:45:40.328 INFO main log: 👤 Role: AUTHORITY 2024-04-15 12:45:40.328 INFO main log: 💾 Database: RocksDb at /tmp/substrated39i9J/chains/rococo_dev/db/full 2024-04-15 12:45:44.508 WARN main log: Took active validators from set with wrong size ... 2024-04-15 12:45:45.805 INFO main log: 👶 Starting BABE Authorship worker 2024-04-15 12:45:45.806 INFO tokio-runtime-worker log: 🥩 BEEFY gadget waiting for BEEFY pallet to become available... 2024-04-15 12:45:45.806 DEBUG tokio-runtime-worker log: New listen address: /ip6/::1/tcp/30333 2024-04-15 12:45:45.806 DEBUG tokio-runtime-worker log: New listen address: /ip4/127.0.0.1/tcp/30333 ``` ### After this PR ``` 2024-04-15 12:59:45.623 INFO main sc_cli::runner: Parity Polkadot 2024-04-15 12:59:45.623 INFO main sc_cli::runner: ✌️ version 1.10.0-d1b0ef76a8b 2024-04-15 12:59:45.623 INFO main sc_cli::runner: ❤️ by Parity Technologies , 2017-2024 2024-04-15 12:59:45.623 INFO main sc_cli::runner: 📋 Chain specification: Development 2024-04-15 12:59:45.623 INFO main sc_cli::runner: 🏷 Node name: helpless-lizards-0550 2024-04-15 12:59:45.623 INFO main sc_cli::runner: 👤 Role: AUTHORITY ... 2024-04-15 12:59:50.204 INFO tokio-runtime-worker beefy: 🥩 BEEFY gadget waiting for BEEFY pallet to become available... 2024-04-15 12:59:50.204 DEBUG tokio-runtime-worker libp2p_tcp: New listen address: /ip6/::1/tcp/30333 2024-04-15 12:59:50.204 DEBUG tokio-runtime-worker libp2p_tcp: New listen address: /ip4/127.0.0.1/tcp/30333 ``` Signed-off-by: Alexandru Vasile --- substrate/client/tracing/src/logging/event_format.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/substrate/client/tracing/src/logging/event_format.rs b/substrate/client/tracing/src/logging/event_format.rs index 9589c1dfee2..8d875427841 100644 --- a/substrate/client/tracing/src/logging/event_format.rs +++ b/substrate/client/tracing/src/logging/event_format.rs @@ -21,6 +21,7 @@ use ansi_term::Colour; use regex::Regex; use std::fmt::{self, Write}; use tracing::{Event, Level, Subscriber}; +use tracing_log::NormalizeEvent; use tracing_subscriber::{ fmt::{format, time::FormatTime, FmtContext, FormatEvent, FormatFields}, registry::LookupSpan, @@ -60,10 +61,12 @@ where N: for<'a> FormatFields<'a> + 'static, { let mut writer = &mut ControlCodeSanitizer::new(!self.enable_color, writer); + let normalized_meta = event.normalized_metadata(); + let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); time::write(&self.timer, &mut format::Writer::new(&mut writer), self.enable_color)?; if self.display_level { - let fmt_level = FmtLevel::new(event.metadata().level(), self.enable_color); + let fmt_level = FmtLevel::new(meta.level(), self.enable_color); write!(writer, "{} ", fmt_level)?; } @@ -81,7 +84,7 @@ where } if self.display_target { - write!(writer, "{}: ", event.metadata().target())?; + write!(writer, "{}: ", meta.target())?; } // Custom code to display node name -- GitLab From 0c9ad5306ce8bbc815d862121a42778c1ea734be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Mon, 15 Apr 2024 17:28:33 +0100 Subject: [PATCH 004/107] [pallet-broker] add tests for renewing leases (#4099) The first test proves that parachains who were migrated over on a legacy lease can renew without downtime. The exception is if their lease expires in period 0 - aka within `region_length` timeslices after `start_sales` is called. The second test is designed such that it passes if the issue exists and should be fixed. This will require an intervention on Kusama to add these renewals to storage as it is too tight to schedule a runtime upgrade before the start_sales call. All leases will still have at least two full regions of coretime. --- substrate/frame/broker/src/tests.rs | 155 ++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index 7fc7e31e9bb..d738d344503 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -892,6 +892,161 @@ fn short_leases_are_cleaned() { }); } +#[test] +fn leases_can_be_renewed() { + TestExt::new().endow(1, 1000).execute_with(|| { + // Timeslice period is 2. + // + // Sale 1 starts at block 7, Sale 2 starts at 13. + + // Set lease to expire in sale 1 and start sales. + assert_ok!(Broker::do_set_lease(2001, 9)); + assert_eq!(Leases::::get().len(), 1); + // Start the sales with only one core for this lease. + assert_ok!(Broker::do_start_sales(100, 1)); + + // Advance to sale period 1, we should get an AllowedRenewal for task 2001 for the next + // sale. + advance_sale_period(); + assert_eq!( + AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), + Some(AllowedRenewalRecord { + price: 100, + completion: CompletionStatus::Complete( + vec![ScheduleItem { mask: CoreMask::complete(), assignment: Task(2001) }] + .try_into() + .unwrap() + ) + }) + ); + // And the lease has been removed from storage. + assert_eq!(Leases::::get().len(), 0); + + // Advance to sale period 2, where we can renew. + advance_sale_period(); + assert_ok!(Broker::do_renew(1, 0)); + // We renew for the base price of the previous sale period. + assert_eq!(balance(1), 900); + + // We just renewed for this period. + advance_sale_period(); + // Now we are off core and the core is pooled. + advance_sale_period(); + // Check the trace agrees. + assert_eq!( + CoretimeTrace::get(), + vec![ + // Period 0 gets no assign core, but leases are on-core. + // Period 1: + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 2 - expiring at the end of this period, so we called renew. + ( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 3 - we get assigned a core because we called renew in period 2. + ( + 18, + AssignCore { + core: 0, + begin: 20, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 4 - we don't get a core as we didn't call renew again. + // This core is recycled into the pool. + ( + 24, + AssignCore { + core: 0, + begin: 26, + assignment: vec![(CoreAssignment::Pool, 57600)], + end_hint: None, + }, + ), + ] + ); + }); +} + +// We understand that this does not work as intended for leases that expire within `region_length` +// timeslices after calling `start_sales`. +#[test] +fn short_leases_cannot_be_renewed() { + TestExt::new().endow(1, 1000).execute_with(|| { + // Timeslice period is 2. + // + // Sale 1 starts at block 7, Sale 2 starts at 13. + + // Set lease to expire in sale period 0 and start sales. + assert_ok!(Broker::do_set_lease(2001, 3)); + assert_eq!(Leases::::get().len(), 1); + // Start the sales with one core for this lease. + assert_ok!(Broker::do_start_sales(100, 1)); + + // The lease is removed. + assert_eq!(Leases::::get().len(), 0); + + // We should have got an entry in AllowedRenewals, but we don't because rotate_sale + // schedules leases a period in advance. This renewal should be in the period after next + // because while bootstrapping our way into the sale periods, we give everything a lease for + // period 1, so they can renew for period 2. So we have a core until the end of period 1, + // but we are not marked as able to renew because we expired before sale period 1 starts. + // + // This should be fixed. + assert_eq!(AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), None); + // And the lease has been removed from storage. + assert_eq!(Leases::::get().len(), 0); + + // Advance to sale period 2, where we now cannot renew. + advance_to(13); + assert_noop!(Broker::do_renew(1, 0), Error::::NotAllowed); + + // Check the trace. + assert_eq!( + CoretimeTrace::get(), + vec![ + // Period 0 gets no assign core, but leases are on-core. + // Period 1 we get assigned a core due to the way the sales are bootstrapped. + ( + 6, + AssignCore { + core: 0, + begin: 8, + assignment: vec![(CoreAssignment::Task(2001), 57600)], + end_hint: None, + }, + ), + // Period 2 - we don't get a core as we couldn't renew. + // This core is recycled into the pool. + ( + 12, + AssignCore { + core: 0, + begin: 14, + assignment: vec![(CoreAssignment::Pool, 57600)], + end_hint: None, + }, + ), + ] + ); + }); +} + #[test] fn leases_are_limited() { TestExt::new().execute_with(|| { -- GitLab From a8f4f4f00f8fc0da512a09e1450bf4cda954d70d Mon Sep 17 00:00:00 2001 From: gui Date: Tue, 16 Apr 2024 03:45:44 +0900 Subject: [PATCH 005/107] pallet assets: Fix errors (#4118) `LiveAsset` is an error to be returned when an asset is not supposed to be live. And `AssetNotLive` is an error to be returned when an asset is supposed to be live, I don't think frozen qualifies as live. --- Cargo.lock | 2 +- prdoc/pr_4118.prdoc | 13 +++++++++++++ substrate/frame/assets/Cargo.toml | 2 +- substrate/frame/assets/src/functions.rs | 2 +- substrate/frame/assets/src/lib.rs | 8 ++++---- 5 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 prdoc/pr_4118.prdoc diff --git a/Cargo.lock b/Cargo.lock index 2339aff34bc..69395bf281e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9571,7 +9571,7 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "29.0.0" +version = "29.1.0" dependencies = [ "frame-benchmarking", "frame-support", diff --git a/prdoc/pr_4118.prdoc b/prdoc/pr_4118.prdoc new file mode 100644 index 00000000000..20f36c1b0a3 --- /dev/null +++ b/prdoc/pr_4118.prdoc @@ -0,0 +1,13 @@ +title: "pallet assets: minor improvement on errors returned for some calls" + +doc: + - audience: Runtime Dev + description: | + Some calls in pallet assets have better errors. No new error is introduced, only more sensible choice are made. + - audience: Runtime User + description: | + Some calls in pallet assets have better errors. No new error is introduced, only more sensible choice are made. + +crates: + - name: pallet-assets + bump: minor diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index 3b95750c14c..ed6df77e152 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "29.0.0" +version = "29.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index 8791aaa736b..4a5fb06ee2c 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -491,7 +491,7 @@ impl, I: 'static> Pallet { let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); let actual = Self::decrease_balance(id.clone(), target, amount, f, |actual, details| { diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 6891f04dfb5..c6b379e1d06 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -987,7 +987,7 @@ pub mod pallet { let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); ensure!(origin == d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; @@ -1024,7 +1024,7 @@ pub mod pallet { let details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( details.status == AssetStatus::Live || details.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); ensure!(origin == details.admin, Error::::NoPermission); let who = T::Lookup::lookup(who)?; @@ -1113,7 +1113,7 @@ pub mod pallet { Asset::::try_mutate(id.clone(), |maybe_details| { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(details.status == AssetStatus::Live, Error::::LiveAsset); + ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(origin == details.owner, Error::::NoPermission); if details.owner == owner { return Ok(()) @@ -1669,7 +1669,7 @@ pub mod pallet { let d = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!( d.status == AssetStatus::Live || d.status == AssetStatus::Frozen, - Error::::AssetNotLive + Error::::IncorrectStatus ); ensure!(origin == d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; -- GitLab From 4b5c3fd0cbb47f3484671ffce284b7586311126a Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 16 Apr 2024 10:25:22 +0300 Subject: [PATCH 006/107] move fragment_tree module to its own folder (#4148) Will make https://github.com/paritytech/polkadot-sdk/pull/4035 easier to review (the mentioned PR already does this move so the diff will be clearer). Also called out as part of: https://github.com/paritytech/polkadot-sdk/pull/3233#discussion_r1490867383 --- .../mod.rs} | 1447 +--------------- .../src/fragment_tree/tests.rs | 1451 +++++++++++++++++ 2 files changed, 1454 insertions(+), 1444 deletions(-) rename polkadot/node/core/prospective-parachains/src/{fragment_tree.rs => fragment_tree/mod.rs} (50%) create mode 100644 polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs similarity index 50% rename from polkadot/node/core/prospective-parachains/src/fragment_tree.rs rename to polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs index 8061dc82d83..86814b976d1 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs @@ -86,6 +86,9 @@ //! will still perform fairly well under these conditions, despite being somewhat wasteful of //! memory. +#[cfg(test)] +mod tests; + use std::{ borrow::Cow, collections::{ @@ -1145,1447 +1148,3 @@ impl FragmentNode { self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) } } - -#[cfg(test)] -mod tests { - use super::*; - use assert_matches::assert_matches; - use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; - use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; - use polkadot_primitives_test_helpers as test_helpers; - use rstest::rstest; - use std::iter; - - impl NodePointer { - fn unwrap_idx(self) -> usize { - match self { - NodePointer::Root => panic!("Unexpected root"), - NodePointer::Storage(index) => index, - } - } - } - - fn make_constraints( - min_relay_parent_number: BlockNumber, - valid_watermarks: Vec, - required_parent: HeadData, - ) -> Constraints { - Constraints { - min_relay_parent_number, - max_pov_size: 1_000_000, - max_code_size: 1_000_000, - ump_remaining: 10, - ump_remaining_bytes: 1_000, - max_ump_num_per_candidate: 10, - dmp_remaining_messages: [0; 10].into(), - hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, - hrmp_channels_out: HashMap::new(), - max_hrmp_num_per_candidate: 0, - required_parent, - validation_code_hash: Hash::repeat_byte(42).into(), - upgrade_restriction: None, - future_validation_code: None, - } - } - - fn make_committed_candidate( - para_id: ParaId, - relay_parent: Hash, - relay_parent_number: BlockNumber, - parent_head: HeadData, - para_head: HeadData, - hrmp_watermark: BlockNumber, - ) -> (PersistedValidationData, CommittedCandidateReceipt) { - let persisted_validation_data = PersistedValidationData { - parent_head, - relay_parent_number, - relay_parent_storage_root: Hash::repeat_byte(69), - max_pov_size: 1_000_000, - }; - - let candidate = CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_id, - relay_parent, - collator: test_helpers::dummy_collator(), - persisted_validation_data_hash: persisted_validation_data.hash(), - pov_hash: Hash::repeat_byte(1), - erasure_root: Hash::repeat_byte(1), - signature: test_helpers::dummy_collator_signature(), - para_head: para_head.hash(), - validation_code_hash: Hash::repeat_byte(42).into(), - }, - commitments: CandidateCommitments { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: para_head, - processed_downward_messages: 1, - hrmp_watermark, - }, - }; - - (persisted_validation_data, candidate) - } - - #[test] - fn scope_rejects_ancestors_that_skip_blocks() { - let para_id = ParaId::from(5u32); - let relay_parent = RelayChainBlockInfo { - number: 10, - hash: Hash::repeat_byte(10), - storage_root: Hash::repeat_byte(69), - }; - - let ancestors = vec![RelayChainBlockInfo { - number: 8, - hash: Hash::repeat_byte(8), - storage_root: Hash::repeat_byte(69), - }]; - - let max_depth = 2; - let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); - let pending_availability = Vec::new(); - - assert_matches!( - Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - pending_availability, - max_depth, - ancestors - ), - Err(UnexpectedAncestor { number: 8, prev: 10 }) - ); - } - - #[test] - fn scope_rejects_ancestor_for_0_block() { - let para_id = ParaId::from(5u32); - let relay_parent = RelayChainBlockInfo { - number: 0, - hash: Hash::repeat_byte(0), - storage_root: Hash::repeat_byte(69), - }; - - let ancestors = vec![RelayChainBlockInfo { - number: 99999, - hash: Hash::repeat_byte(99), - storage_root: Hash::repeat_byte(69), - }]; - - let max_depth = 2; - let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); - let pending_availability = Vec::new(); - - assert_matches!( - Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - pending_availability, - max_depth, - ancestors, - ), - Err(UnexpectedAncestor { number: 99999, prev: 0 }) - ); - } - - #[test] - fn scope_only_takes_ancestors_up_to_min() { - let para_id = ParaId::from(5u32); - let relay_parent = RelayChainBlockInfo { - number: 5, - hash: Hash::repeat_byte(0), - storage_root: Hash::repeat_byte(69), - }; - - let ancestors = vec![ - RelayChainBlockInfo { - number: 4, - hash: Hash::repeat_byte(4), - storage_root: Hash::repeat_byte(69), - }, - RelayChainBlockInfo { - number: 3, - hash: Hash::repeat_byte(3), - storage_root: Hash::repeat_byte(69), - }, - RelayChainBlockInfo { - number: 2, - hash: Hash::repeat_byte(2), - storage_root: Hash::repeat_byte(69), - }, - ]; - - let max_depth = 2; - let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); - let pending_availability = Vec::new(); - - let scope = Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - pending_availability, - max_depth, - ancestors, - ) - .unwrap(); - - assert_eq!(scope.ancestors.len(), 2); - assert_eq!(scope.ancestors_by_hash.len(), 2); - } - - #[test] - fn storage_add_candidate() { - let mut storage = CandidateStorage::new(); - let relay_parent = Hash::repeat_byte(69); - - let (pvd, candidate) = make_committed_candidate( - ParaId::from(5u32), - relay_parent, - 8, - vec![4, 5, 6].into(), - vec![1, 2, 3].into(), - 7, - ); - - let candidate_hash = candidate.hash(); - let parent_head_hash = pvd.parent_head.hash(); - - storage.add_candidate(candidate, pvd).unwrap(); - assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - - assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); - } - - #[test] - fn storage_retain() { - let mut storage = CandidateStorage::new(); - - let (pvd, candidate) = make_committed_candidate( - ParaId::from(5u32), - Hash::repeat_byte(69), - 8, - vec![4, 5, 6].into(), - vec![1, 2, 3].into(), - 7, - ); - - let candidate_hash = candidate.hash(); - let output_head_hash = candidate.commitments.head_data.hash(); - let parent_head_hash = pvd.parent_head.hash(); - - storage.add_candidate(candidate, pvd).unwrap(); - storage.retain(|_| true); - assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - assert!(storage.head_data_by_hash(&output_head_hash).is_some()); - - storage.retain(|_| false); - assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); - assert!(storage.head_data_by_hash(&output_head_hash).is_none()); - } - - // [`FragmentTree::populate`] should pick up candidates that build on other candidates. - #[test] - fn populate_works_recursively() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert!(candidates.contains(&candidate_a_hash)); - assert!(candidates.contains(&candidate_b_hash)); - - assert_eq!(tree.nodes.len(), 2); - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[0].depth, 0); - - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[1].depth, 1); - } - - #[test] - fn children_of_root_are_contiguous() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - - let (pvd_a2, candidate_a2) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b, 1].into(), - 0, - ); - let candidate_a2_hash = candidate_a2.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_a2, pvd_a2).unwrap(); - tree.add_and_populate(candidate_a2_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); - } - - #[test] - fn add_candidate_child_of_root() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); - } - - #[test] - fn add_candidate_child_of_non_root() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - } - - #[test] - fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 10; - - // Empty tree - let storage = CandidateStorage::new(); - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - - let relay_parent_info = - RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; - - let scope = Scope::with_ancestors( - para_id, - relay_parent_info, - base_constraints, - vec![], - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - assert_eq!(tree.candidates().collect::>().len(), 0); - assert_eq!(tree.nodes.len(), 0); - - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); - // Invalid candidate. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); - assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); - } - - #[rstest] - #[case(true, 13)] - #[case(false, 8)] - // The tree with no cycles looks like: - // Make a tree that looks like this (note that there's no cycle): - // +-(root)-+ - // | | - // +----0---+ 7 - // | | - // 1----+ 5 - // | | - // | | - // 2 6 - // | - // 3 - // | - // 4 - // - // The tree with cycles is the same as the first but has a cycle from 4 back to the state - // produced by 0 (It's bounded by the max_depth + 1). - // +-(root)-+ - // | | - // +----0---+ 7 - // | | - // 1----+ 5 - // | | - // | | - // 2 6 - // | - // 3 - // | - // 4---+ - // | | - // 1 5 - // | - // 2 - // | - // 3 - fn test_find_ancestor_path_and_find_backable_chain( - #[case] has_cycle: bool, - #[case] expected_node_count: usize, - ) { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 7; - let relay_parent_number = 0; - let relay_parent_storage_root = Hash::repeat_byte(69); - - let mut candidates = vec![]; - - // Candidate 0 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![0].into(), - 0, - )); - // Candidate 1 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![1].into(), - 0, - )); - // Candidate 2 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![2].into(), - 0, - )); - // Candidate 3 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![2].into(), - vec![3].into(), - 0, - )); - // Candidate 4 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![4].into(), - 0, - )); - // Candidate 5 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![5].into(), - 0, - )); - // Candidate 6 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![6].into(), - 0, - )); - // Candidate 7 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![7].into(), - 0, - )); - - if has_cycle { - candidates[4] = make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![0].into(), // put the cycle here back to the output state of 0. - 0, - ); - } - - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - let mut storage = CandidateStorage::new(); - - let relay_parent_info = RelayChainBlockInfo { - number: relay_parent_number, - hash: relay_parent, - storage_root: relay_parent_storage_root, - }; - - for (pvd, candidate) in candidates.iter() { - storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); - } - let candidates = - candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_info, - base_constraints, - vec![], - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!(tree.candidates().collect::>().len(), candidates.len()); - assert_eq!(tree.nodes.len(), expected_node_count); - - // Do some common tests on both trees. - { - // No ancestors supplied. - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // Ancestor which is not part of the tree. Will be ignored. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // A chain fork. - let ancestors: Ancestors = - [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - - // Ancestors which are part of the tree but don't form a path. Will be ignored. - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors. - let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[7].hash()); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 2, |_| true), - [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors with candidates which have been omitted due to timeouts - let ancestors: Ancestors = - [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[0].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 3, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); - if has_cycle { - assert_eq!( - tree.find_backable_chain(ancestors, 2, |_| true), - [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } else { - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } - - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - assert_eq!(res, NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Requested count is 0. - assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); - } - - // Now do some tests only on the tree with cycles - if has_cycle { - // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at - // 0-1-2-3-4-1-2-3. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - ] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[4].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // 0-1-2. - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 1, |_| true), - [3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - tree.find_backable_chain(ancestors, 5, |_| true), - [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // 0-1 - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 6, |_| true), - [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), - ); - - // For 0-1-2-3-4-5, there's more than 1 way of finding this path in - // the tree. `None` should be returned. The runtime should not have accepted this. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - candidates[5].hash(), - ] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()); - assert_eq!(res, None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - } - } - - #[test] - fn graceful_cycle_of_0() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 1); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); - - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - - for count in 1..10 { - assert_eq!( - tree.find_backable_chain(Ancestors::new(), count, |_| true), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize, max_depth + 1)) - .collect::>() - ); - assert_eq!( - tree.find_backable_chain( - [candidate_a_hash].into_iter().collect(), - count - 1, - |_| true - ), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize - 1, max_depth)) - .collect::>() - ); - } - } - - #[test] - fn graceful_cycle_of_1() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); - - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - - assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 2, |_| true), - vec![candidate_a_hash, candidate_b_hash], - ); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 3, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], - ); - assert_eq!( - tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), - vec![candidate_b_hash, candidate_a_hash], - ); - - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 6, |_| true), - vec![ - candidate_a_hash, - candidate_b_hash, - candidate_a_hash, - candidate_b_hash, - candidate_a_hash - ], - ); - - for count in 3..7 { - assert_eq!( - tree.find_backable_chain( - [candidate_a_hash, candidate_b_hash].into_iter().collect(), - count, - |_| true - ), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], - ); - } - } - - #[test] - fn hypothetical_depths_known_and_unknown() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0, 2, 4], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_b_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![1, 3], - ); - - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(21)), - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0, 2, 4], - ); - - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(22)), - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![1, 3] - ); - } - - #[test] - fn hypothetical_depths_stricter_on_complete() { - let storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 1000, // watermark is illegal - ); - - let candidate_a_hash = candidate_a.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0], - ); - - assert!(tree - .hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Complete { - receipt: Cow::Owned(candidate_a), - persisted_validation_data: Cow::Owned(pvd_a), - }, - &storage, - false, - ) - .is_empty()); - } - - #[test] - fn hypothetical_depths_backed_in_path() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0d].into(), - 0, - ); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - storage.add_candidate(candidate_c, pvd_c).unwrap(); - - // `A` and `B` are backed, `C` is not. - storage.mark_backed(&candidate_a_hash); - storage.mark_backed(&candidate_b_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); - assert_eq!(tree.nodes.len(), 3); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![0], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![2], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - Vec::::new(), - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![2], // non-empty if `false`. - ); - } - - #[test] - fn pending_availability_in_scope() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_c = Hash::repeat_byte(3); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - - // Note that relay parent `a` is not allowed. - let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let pending_availability = vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_c_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number + 1, - hash: relay_parent_c, - storage_root: Hash::zero(), - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - storage.mark_backed(&candidate_a_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info, - base_constraints, - pending_availability, - max_depth, - vec![relay_parent_b_info], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), 2); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_c, - }, - &storage, - false, - ), - vec![1], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_b, - }, - &storage, - false, - ), - vec![2], - ); - } -} diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs new file mode 100644 index 00000000000..fd41be55f7f --- /dev/null +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs @@ -0,0 +1,1451 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use assert_matches::assert_matches; +use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; +use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; +use polkadot_primitives_test_helpers as test_helpers; +use rstest::rstest; +use std::iter; + +impl NodePointer { + fn unwrap_idx(self) -> usize { + match self { + NodePointer::Root => panic!("Unexpected root"), + NodePointer::Storage(index) => index, + } + } +} + +fn make_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, +) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, + dmp_remaining_messages: [0; 10].into(), + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash: Hash::repeat_byte(42).into(), + upgrade_restriction: None, + future_validation_code: None, + } +} + +fn make_committed_candidate( + para_id: ParaId, + relay_parent: Hash, + relay_parent_number: BlockNumber, + parent_head: HeadData, + para_head: HeadData, + hrmp_watermark: BlockNumber, +) -> (PersistedValidationData, CommittedCandidateReceipt) { + let persisted_validation_data = PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id, + relay_parent, + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + }, + commitments: CandidateCommitments { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: para_head, + processed_downward_messages: 1, + hrmp_watermark, + }, + }; + + (persisted_validation_data, candidate) +} + +#[test] +fn scope_rejects_ancestors_that_skip_blocks() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 10, + hash: Hash::repeat_byte(10), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 8, + hash: Hash::repeat_byte(8), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors + ), + Err(UnexpectedAncestor { number: 8, prev: 10 }) + ); +} + +#[test] +fn scope_rejects_ancestor_for_0_block() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 0, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 99999, + hash: Hash::repeat_byte(99), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ), + Err(UnexpectedAncestor { number: 99999, prev: 0 }) + ); +} + +#[test] +fn scope_only_takes_ancestors_up_to_min() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + let scope = Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ) + .unwrap(); + + assert_eq!(scope.ancestors.len(), 2); + assert_eq!(scope.ancestors_by_hash.len(), 2); +} + +#[test] +fn storage_add_candidate() { + let mut storage = CandidateStorage::new(); + let relay_parent = Hash::repeat_byte(69); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + relay_parent, + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + + assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); +} + +#[test] +fn storage_retain() { + let mut storage = CandidateStorage::new(); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let output_head_hash = candidate.commitments.head_data.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + storage.retain(|_| true); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + assert!(storage.head_data_by_hash(&output_head_hash).is_some()); + + storage.retain(|_| false); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); + assert!(storage.head_data_by_hash(&output_head_hash).is_none()); +} + +// [`FragmentTree::populate`] should pick up candidates that build on other candidates. +#[test] +fn populate_works_recursively() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_b_info, + base_constraints, + pending_availability, + 4, + ancestors, + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert!(candidates.contains(&candidate_a_hash)); + assert!(candidates.contains(&candidate_b_hash)); + + assert_eq!(tree.nodes.len(), 2); + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[0].depth, 0); + + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[1].depth, 1); +} + +#[test] +fn children_of_root_are_contiguous() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b, 1].into(), + 0, + ); + let candidate_a2_hash = candidate_a2.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_b_info, + base_constraints, + pending_availability, + 4, + ancestors, + ) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_a2, pvd_a2).unwrap(); + tree.add_and_populate(candidate_a2_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); +} + +#[test] +fn add_candidate_child_of_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); +} + +#[test] +fn add_candidate_child_of_non_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); +} + +#[test] +fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; + + // Empty tree + let storage = CandidateStorage::new(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + assert_eq!(tree.candidates().collect::>().len(), 0); + assert_eq!(tree.nodes.len(), 0); + + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + // Invalid candidate. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); + assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); +} + +#[rstest] +#[case(true, 13)] +#[case(false, 8)] +// The tree with no cycles looks like: +// Make a tree that looks like this (note that there's no cycle): +// +-(root)-+ +// | | +// +----0---+ 7 +// | | +// 1----+ 5 +// | | +// | | +// 2 6 +// | +// 3 +// | +// 4 +// +// The tree with cycles is the same as the first but has a cycle from 4 back to the state +// produced by 0 (It's bounded by the max_depth + 1). +// +-(root)-+ +// | | +// +----0---+ 7 +// | | +// 1----+ 5 +// | | +// | | +// 2 6 +// | +// 3 +// | +// 4---+ +// | | +// 1 5 +// | +// 2 +// | +// 3 +fn test_find_ancestor_path_and_find_backable_chain( + #[case] has_cycle: bool, + #[case] expected_node_count: usize, +) { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 7; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::repeat_byte(69); + + let mut candidates = vec![]; + + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![5].into(), + 0, + )); + // Candidate 6 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![6].into(), + 0, + )); + // Candidate 7 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![7].into(), + 0, + )); + + if has_cycle { + candidates[4] = make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![0].into(), // put the cycle here back to the output state of 0. + 0, + ); + } + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::new(); + + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + for (pvd, candidate) in candidates.iter() { + storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + } + let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!(tree.candidates().collect::>().len(), candidates.len()); + assert_eq!(tree.nodes.len(), expected_node_count); + + // Do some common tests on both trees. + { + // No ancestors supplied. + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // Ancestor which is not part of the tree. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // A chain fork. + let ancestors: Ancestors = + [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + // Ancestors which are part of the tree but don't form a path. Will be ignored. + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[7].hash()); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = + [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[0].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + if has_cycle { + assert_eq!( + tree.find_backable_chain(ancestors, 2, |_| true), + [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } else { + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } + + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + assert_eq!(res, NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Requested count is 0. + assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + } + + // Now do some tests only on the tree with cycles + if has_cycle { + // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at + // 0-1-2-3-4-1-2-3. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[4].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1-2. + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 1, |_| true), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + tree.find_backable_chain(ancestors, 5, |_| true), + [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1 + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 6, |_| true), + [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + ); + + // For 0-1-2-3-4-5, there's more than 1 way of finding this path in + // the tree. `None` should be returned. The runtime should not have accepted this. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + candidates[5].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()); + assert_eq!(res, None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + } +} + +#[test] +fn graceful_cycle_of_0() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 1); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + + for count in 1..10 { + assert_eq!( + tree.find_backable_chain(Ancestors::new(), count, |_| true), + iter::repeat(candidate_a_hash) + .take(std::cmp::min(count as usize, max_depth + 1)) + .collect::>() + ); + assert_eq!( + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), count - 1, |_| true), + iter::repeat(candidate_a_hash) + .take(std::cmp::min(count as usize - 1, max_depth)) + .collect::>() + ); + } +} + +#[test] +fn graceful_cycle_of_1() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + + assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 2, |_| true), + vec![candidate_a_hash, candidate_b_hash], + ); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 3, |_| true), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + assert_eq!( + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), + vec![candidate_b_hash, candidate_a_hash], + ); + + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 6, |_| true), + vec![ + candidate_a_hash, + candidate_b_hash, + candidate_a_hash, + candidate_b_hash, + candidate_a_hash + ], + ); + + for count in 3..7 { + assert_eq!( + tree.find_backable_chain( + [candidate_a_hash, candidate_b_hash].into_iter().collect(), + count, + |_| true + ), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + } +} + +#[test] +fn hypothetical_depths_known_and_unknown() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_b_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![1, 3], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(21)), + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(22)), + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![1, 3] + ); +} + +#[test] +fn hypothetical_depths_stricter_on_complete() { + let storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 1000, // watermark is illegal + ); + + let candidate_a_hash = candidate_a.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![0], + ); + + assert!(tree + .hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Complete { + receipt: Cow::Owned(candidate_a), + persisted_validation_data: Cow::Owned(pvd_a), + }, + &storage, + false, + ) + .is_empty()); +} + +#[test] +fn hypothetical_depths_backed_in_path() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0d].into(), + 0, + ); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_c, pvd_c).unwrap(); + + // `A` and `B` are backed, `C` is not. + storage.mark_backed(&candidate_a_hash); + storage.mark_backed(&candidate_b_hash); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + assert_eq!(tree.nodes.len(), 3); + + let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + vec![0], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + vec![2], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + Vec::::new(), + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![2], // non-empty if `false`. + ); +} + +#[test] +fn pending_availability_in_scope() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_c = Hash::repeat_byte(3); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + // Note that relay parent `a` is not allowed. + let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + let pending_availability = vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + let relay_parent_c_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number + 1, + hash: relay_parent_c, + storage_root: Hash::zero(), + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.mark_backed(&candidate_a_hash); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info, + base_constraints, + pending_availability, + max_depth, + vec![relay_parent_b_info], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), 2); + + let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_c, + }, + &storage, + false, + ), + vec![1], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + relay_parent: relay_parent_b, + }, + &storage, + false, + ), + vec![2], + ); +} -- GitLab From 8891b70fe284f2d0a294f13d9a5afbca26a20513 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 16 Apr 2024 13:08:26 +0300 Subject: [PATCH 007/107] [pallet-broker] Use saturating math in input validation (#4151) Changes: - Saturate in the input validation of he drop history function or pallet-broker. --------- Signed-off-by: Oliver Tale-Yazdi --- prdoc/pr_4151.prdoc | 11 +++++++++++ substrate/frame/broker/src/dispatchable_impls.rs | 5 ++++- substrate/frame/broker/src/tests.rs | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_4151.prdoc diff --git a/prdoc/pr_4151.prdoc b/prdoc/pr_4151.prdoc new file mode 100644 index 00000000000..70b9f5e60e1 --- /dev/null +++ b/prdoc/pr_4151.prdoc @@ -0,0 +1,11 @@ +title: "[pallet-broker] Use saturating math in input validation" + +doc: + - audience: Runtime Dev + description: | + Use saturating in the pallet-broker input validation of the `drop_history` extrinsic. This + fixes a safeguard that only expired historic instantaneous pool records get dropped. + +crates: + - name: pallet-broker + bump: patch diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index c2e731462ca..b43911b6bc1 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -419,7 +419,10 @@ impl Pallet { pub(crate) fn do_drop_history(when: Timeslice) -> DispatchResult { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; let status = Status::::get().ok_or(Error::::Uninitialized)?; - ensure!(status.last_timeslice > when + config.contribution_timeout, Error::::StillValid); + ensure!( + status.last_timeslice > when.saturating_add(config.contribution_timeout), + Error::::StillValid + ); let record = InstaPoolHistory::::take(when).ok_or(Error::::NoHistory)?; if let Some(payout) = record.maybe_payout { let _ = Self::charge(&Self::account_id(), payout); diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index d738d344503..8ec0c6d158b 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -146,6 +146,7 @@ fn drop_history_works() { advance_to(16); assert_eq!(InstaPoolHistory::::iter().count(), 6); advance_to(17); + assert_noop!(Broker::do_drop_history(u32::MAX), Error::::StillValid); assert_noop!(Broker::do_drop_history(region.begin), Error::::StillValid); advance_to(18); assert_eq!(InstaPoolHistory::::iter().count(), 6); -- GitLab From dd5dbf390271976f3b61ed6a3af70795e7696e71 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Tue, 16 Apr 2024 12:10:12 +0200 Subject: [PATCH 008/107] added check to ensure there are approvals (#4152) Follow up to #3431 Added an api check to verify that there are pre-existing approvals in the PR before dismissing reviews and posting a message --- .github/workflows/review-trigger.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 061cf4ab09e..007797d2f4b 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -21,25 +21,29 @@ jobs: - name: Skip merge queue if: ${{ contains(github.ref, 'gh-readonly-queue') }} run: exit 0 - - name: Get comments + - name: Get PR data id: comments - run: echo "bodies=$(gh pr view ${{ github.event.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT" + run: | + echo "bodies=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT" + echo "reviews=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews --jq '[.[].state]')" >> "$GITHUB_OUTPUT" env: GH_TOKEN: ${{ github.token }} - name: Fail when author pushes new code # Require new reviews when the author is pushing and he is not a member if: | + contains(fromJson(steps.comments.outputs.reviews), 'APPROVED') && github.event_name == 'pull_request_target' && github.event.action == 'synchronize' && github.event.sender.login == github.event.pull_request.user.login && github.event.pull_request.author_association != 'MEMBER' run: | + echo "User's association is ${{ github.event.pull_request.author_association }}" # We get the list of reviewers who approved the PR - REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.number }}/reviews \ + REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews \ --jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}') # We request them to review again - echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.number }}/requested_reviewers --input - + echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input - echo "::error::Project needs to be reviewed again" exit 1 @@ -49,7 +53,7 @@ jobs: # If the previous step failed and github-actions hasn't commented yet we comment instructions if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed') run: | - gh pr comment ${{ github.event.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" + gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" env: GH_TOKEN: ${{ github.token }} COMMENTS: ${{ steps.comments.outputs.users }} -- GitLab From 61d45ed72b2f8afade997e1a973327f2ada02aa0 Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:11:22 +0100 Subject: [PATCH 009/107] Update review-trigger.yml (#4137) Followup after https://github.com/paritytech/polkadot-sdk/pull/3431 Per https://stackoverflow.com/questions/63188674/github-actions-detect-author-association and https://michaelheap.com/github-actions-check-permission/ looks like just checking NOT a MEMBER is not correct, Not a CONTRIBUTORs check should be included --- .github/workflows/review-trigger.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 007797d2f4b..7f7d9d36278 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -35,6 +35,7 @@ jobs: github.event_name == 'pull_request_target' && github.event.action == 'synchronize' && github.event.sender.login == github.event.pull_request.user.login && + github.event.pull_request.author_association != 'CONTRIBUTOR' && github.event.pull_request.author_association != 'MEMBER' run: | echo "User's association is ${{ github.event.pull_request.author_association }}" -- GitLab From 753bf2d860e083b5da25fe4171c0e540ddad4888 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 16 Apr 2024 18:17:09 +0300 Subject: [PATCH 010/107] [prdoc] Update docs (#3998) Updating the prdoc doc file to be a bit more useful for new contributors and adding a SemVer section. --------- Signed-off-by: Oliver Tale-Yazdi --- docs/contributor/prdoc.md | 100 +++++++++++++++++++++----------------- prdoc/schema_user.json | 5 ++ 2 files changed, 60 insertions(+), 45 deletions(-) diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index af0ede5107a..0c8165af40f 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -1,55 +1,31 @@ # PRDoc -## Intro - -With the merge of [PR #1946](https://github.com/paritytech/polkadot-sdk/pull/1946), a new method for -documenting changes has been introduced: `prdoc`. The [prdoc repository](https://github.com/paritytech/prdoc) -contains more documentation and tooling. - -The current document describes how to quickly get started authoring `PRDoc` files. +A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to +record changes on a crate level. This information is then processed by the release team to apply the correct crate +version bumps and to generate the CHANGELOG of the next release. ## Requirements -When creating a PR, the author needs to decides with the `R0` label whether the change (PR) should -appear in the release notes or not. - -Labelling a PR with `R0` means that no `PRDoc` is required. - -A PR without the `R0` label **does** require a valid `PRDoc` file to be introduced in the PR. - -## PRDoc how-to - -A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). - -For significant changes, a `.prdoc` file is mandatory and the file must meet the following -requirements: -- file named `pr_NNNN.prdoc` where `NNNN` is the PR number. - For convenience, those file can also contain a short description: `pr_NNNN_foobar.prdoc`. -- located under the [`prdoc` folder](https://github.com/paritytech/polkadot-sdk/tree/master/prdoc) of the repository -- compliant with the [JSON schema](https://json-schema.org/) defined in `prdoc/schema_user.json` - -Those requirements can be fulfilled manually without any tooling but a text editor. - -## Tooling - -Users might find the following helpers convenient: -- Setup VSCode to be aware of the prdoc schema: see [using VSCode](https://github.com/paritytech/prdoc#using-vscode) -- Using the `prdoc` cli to: - - generate a `PRDoc` file from a [template defined in the Polkadot SDK - repo](https://github.com/paritytech/polkadot-sdk/blob/master/prdoc/.template.prdoc) simply providing a PR number - - check the validity of one or more `PRDoc` files +When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The +`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping +the [CODEOWNERS](../../.github/CODEOWNERS) for advice. -## `prdoc` cli usage +## PRDoc How-To -The `prdoc` cli documentation can be found at https://github.com/paritytech/prdoc#prdoc +A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one: -tldr: -- `prdoc generate ` -- `prdoc check -n ` +1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install prdoc`. +1. Open a Pull Request and get the PR number. +1. Generate the file with `prdoc generate `. The output filename will be printed. +1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example +[VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). +1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections. +1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check it. -where is the PR number. +> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file: +> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` -## Pick an audience +## Pick An Audience While describing a PR, the author needs to consider which audience(s) need to be addressed. The list of valid audiences is described and documented in the JSON schema as follow: @@ -65,7 +41,41 @@ The list of valid audiences is described and documented in the JSON schema as fo - `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain. -## Tips +If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and +re-phrase the changes for each audience. + +## Record SemVer Changes + +All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells +the release team how to bump the crate version prior to the next release. It is very important that this information is +correct, otherwise it could break the code of downstream teams. + +The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by +[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other +applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs +are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc +about them. + +> **Note**: There is currently no CI in place to sanity check this information, but should be added soon. + +### Example + +For example when you modified two crates and record the changes: + +```yaml +crates: +- name: frame-example + bump: major +- name: frame-example-pallet + bump: minor +``` + +It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using +`frame-example` might break. + +### Dependencies -The PRDoc schema is defined in each repo and usually is quite restrictive. -You cannot simply add a new property to a `PRDoc` file unless the Schema allows it. +A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to +bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change. +`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest +compatible version. diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 6b44b1b28df..1fa4b8d1202 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -200,6 +200,11 @@ "const": "patch", "title": "Patch", "description": "A bump to the third leftmost non-zero digit of the version number." + }, + { + "const": "none", + "title": "None", + "description": "This change requires no SemVer bump (e.g. change was a test)." } ] }, -- GitLab From 6f3d890ed35bfdee3e3f7d59018345635a62d1cd Mon Sep 17 00:00:00 2001 From: Muharem Date: Tue, 16 Apr 2024 18:11:14 +0200 Subject: [PATCH 011/107] FRAME: Unity Balance Conversion for Different IDs of Native Asset (#3659) Introduce types to define 1:1 balance conversion for different relative asset ids/locations of native asset. Examples: native asset on Asset Hub presented as `VersionedLocatableAsset` type in the context of Relay Chain is ``` { `location`: (0, Parachain(1000)), `asset_id`: (1, Here), } ``` and it's balance should be converted 1:1 by implementations of `ConversionToAssetBalance` trait. --------- Co-authored-by: Branislav Kontur --- Cargo.lock | 35 +++ Cargo.toml | 1 + .../emulated/chains/relays/rococo/src/lib.rs | 2 + .../tests/assets/asset-hub-rococo/Cargo.toml | 5 + .../assets/asset-hub-rococo/src/tests/mod.rs | 1 + .../asset-hub-rococo/src/tests/treasury.rs | 270 ++++++++++++++++++ .../collectives-westend/Cargo.toml | 43 +++ .../collectives-westend/src/lib.rs | 30 ++ .../src/tests/fellowship_treasury.rs | 236 +++++++++++++++ .../collectives-westend/src/tests/mod.rs | 16 ++ .../collectives-westend/src/fellowship/mod.rs | 24 +- polkadot/runtime/common/src/impls.rs | 22 +- polkadot/runtime/rococo/src/lib.rs | 28 +- polkadot/runtime/westend/src/lib.rs | 21 +- polkadot/xcm/xcm-builder/src/barriers.rs | 23 ++ polkadot/xcm/xcm-builder/src/lib.rs | 4 +- prdoc/pr_3659.prdoc | 12 + substrate/frame/support/src/traits.rs | 2 +- substrate/frame/support/src/traits/members.rs | 9 + substrate/frame/support/src/traits/tokens.rs | 4 +- .../frame/support/src/traits/tokens/misc.rs | 28 ++ 21 files changed, 790 insertions(+), 26 deletions(-) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs create mode 100644 prdoc/pr_3659.prdoc diff --git a/Cargo.lock b/Cargo.lock index 69395bf281e..4ee0dfdccff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -820,17 +820,22 @@ dependencies = [ "assert_matches", "asset-hub-rococo-runtime", "asset-test-utils", + "cumulus-pallet-parachain-system", "emulated-integration-tests-common", "frame-support", "pallet-asset-conversion", "pallet-assets", "pallet-balances", "pallet-message-queue", + "pallet-treasury", + "pallet-utility", "pallet-xcm", "parachains-common", "parity-scale-codec", "penpal-runtime", + "polkadot-runtime-common", "rococo-runtime", + "rococo-runtime-constants", "rococo-system-emulated-network", "sp-runtime", "staging-xcm", @@ -2830,6 +2835,36 @@ dependencies = [ "testnet-parachains-constants", ] +[[package]] +name = "collectives-westend-integration-tests" +version = "1.0.0" +dependencies = [ + "assert_matches", + "asset-hub-westend-runtime", + "collectives-westend-runtime", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "emulated-integration-tests-common", + "frame-support", + "pallet-asset-rate", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-treasury", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "sp-runtime", + "staging-xcm", + "staging-xcm-executor", + "testnet-parachains-constants", + "westend-runtime", + "westend-runtime-constants", + "westend-system-emulated-network", +] + [[package]] name = "collectives-westend-runtime" version = "3.0.0" diff --git a/Cargo.toml b/Cargo.toml index 460c49f7f37..b3ae264bc3a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,7 @@ members = [ "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend", "cumulus/parachains/integration-tests/emulated/tests/people/people-rococo", "cumulus/parachains/integration-tests/emulated/tests/people/people-westend", "cumulus/parachains/pallets/collective-content", diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index 379a29d697b..7a3a936ec97 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -39,6 +39,8 @@ decl_test_relay_chains! { Hrmp: rococo_runtime::Hrmp, Identity: rococo_runtime::Identity, IdentityMigrator: rococo_runtime::IdentityMigrator, + Treasury: rococo_runtime::Treasury, + AssetRate: rococo_runtime::AssetRate, } }, } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index c5a672234a0..ddd6d2d0498 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -21,15 +21,20 @@ pallet-balances = { path = "../../../../../../../substrate/frame/balances", defa pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } +pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } +polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants" } # Cumulus asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } +cumulus-pallet-parachain-system = { path = "../../../../../../pallets/parachain-system", default-features = false } parachains-common = { path = "../../../../../common" } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs index 2402989225a..346af308238 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs @@ -19,3 +19,4 @@ mod send; mod set_xcm_versions; mod swap; mod teleport; +mod treasury; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs new file mode 100644 index 00000000000..01bf40ae8fd --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -0,0 +1,270 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::imports::*; +use emulated_integration_tests_common::accounts::{ALICE, BOB}; +use frame_support::{ + dispatch::RawOrigin, + sp_runtime::traits::Dispatchable, + traits::{ + fungible::Inspect, + fungibles::{Create, Inspect as FungiblesInspect, Mutate}, + }, +}; +use parachains_common::AccountId; +use polkadot_runtime_common::impls::VersionedLocatableAsset; +use rococo_runtime::OriginCaller; +use rococo_runtime_constants::currency::GRAND; +use xcm_executor::traits::ConvertLocation; + +// Fund Treasury account on Asset Hub from Treasury account on Relay Chain with ROCs. +#[test] +fn spend_roc_on_asset_hub() { + // initial treasury balance on Asset Hub in ROCs. + let treasury_balance = 9_000 * GRAND; + // the balance spend on Asset Hub. + let treasury_spend_balance = 1_000 * GRAND; + + let init_alice_balance = AssetHubRococo::execute_with(|| { + <::Balances as Inspect<_>>::balance( + &AssetHubRococo::account_id_of(ALICE), + ) + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type Runtime = ::Runtime; + type Balances = ::Balances; + type Treasury = ::Treasury; + + // Fund Treasury account on Asset Hub with ROCs. + + let root = ::RuntimeOrigin::root(); + let treasury_account = Treasury::account_id(); + + // Mint assets to Treasury account on Relay Chain. + assert_ok!(Balances::force_set_balance( + root.clone(), + treasury_account.clone().into(), + treasury_balance * 2, + )); + + let native_asset = Location::here(); + let asset_hub_location: Location = [Parachain(1000)].into(); + let treasury_location: Location = (Parent, PalletInstance(18)).into(); + + let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { + as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { + dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::V4(treasury_location)), + assets: bx!(VersionedAssets::V4( + Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() + )), + fee_asset_item: 0, + })), + }); + + // Dispatched from Root to `despatch_as` `Signed(treasury_account)`. + assert_ok!(teleport_call.dispatch(root)); + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type Treasury = ::Treasury; + + // Fund Alice account from Rococo Treasury account on Asset Hub. + + let treasury_origin: RuntimeOrigin = + rococo_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + + let alice_location: Location = + [Junction::AccountId32 { network: None, id: Rococo::account_id_of(ALICE).into() }] + .into(); + let asset_hub_location: Location = [Parachain(1000)].into(); + let native_asset = Location::parent(); + + let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { + asset_kind: bx!(VersionedLocatableAsset::V4 { + location: asset_hub_location.clone(), + asset_id: native_asset.into(), + }), + amount: treasury_spend_balance, + beneficiary: bx!(VersionedLocation::V4(alice_location)), + valid_from: None, + }); + + assert_ok!(treasury_spend_call.dispatch(treasury_origin)); + + // Claim the spend. + + let bob_signed = RuntimeOrigin::signed(Rococo::account_id_of(BOB)); + assert_ok!(Treasury::payout(bob_signed.clone(), 0)); + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::AssetSpendApproved { .. }) => {}, + RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Balances = ::Balances; + + // Ensure that the funds deposited to Alice account. + + let alice_account = AssetHubRococo::account_id_of(ALICE); + assert_eq!( + >::balance(&alice_account), + treasury_spend_balance + init_alice_balance + ); + + // Assert events triggered by xcm pay program: + // 1. treasury asset transferred to spend beneficiary; + // 2. response to Relay Chain Treasury pallet instance sent back; + // 3. XCM program completed; + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { .. }) => {}, + RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); +} + +#[test] +fn create_and_claim_treasury_spend_in_usdt() { + const ASSET_ID: u32 = 1984; + const SPEND_AMOUNT: u128 = 1_000_000; + // treasury location from a sibling parachain. + let treasury_location: Location = Location::new(1, PalletInstance(18)); + // treasury account on a sibling parachain. + let treasury_account = + asset_hub_rococo_runtime::xcm_config::LocationToAccountId::convert_location( + &treasury_location, + ) + .unwrap(); + let asset_hub_location = + v3::Location::new(0, v3::Junction::Parachain(AssetHubRococo::para_id().into())); + let root = ::RuntimeOrigin::root(); + // asset kind to be spend from the treasury. + let asset_kind = VersionedLocatableAsset::V3 { + location: asset_hub_location, + asset_id: v3::AssetId::Concrete( + (v3::Junction::PalletInstance(50), v3::Junction::GeneralIndex(ASSET_ID.into())).into(), + ), + }; + // treasury spend beneficiary. + let alice: AccountId = Rococo::account_id_of(ALICE); + let bob: AccountId = Rococo::account_id_of(BOB); + let bob_signed = ::RuntimeOrigin::signed(bob.clone()); + + AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + + // create an asset class and mint some assets to the treasury account. + assert_ok!(>::create( + ASSET_ID, + treasury_account.clone(), + true, + SPEND_AMOUNT / 2 + )); + assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); + // beneficiary has zero balance. + assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Treasury = ::Treasury; + type AssetRate = ::AssetRate; + + // create a conversion rate from `asset_kind` to the native currency. + assert_ok!(AssetRate::create(root.clone(), Box::new(asset_kind.clone()), 2.into())); + + // create and approve a treasury spend. + assert_ok!(Treasury::spend( + root, + Box::new(asset_kind), + SPEND_AMOUNT, + Box::new(Location::new(0, Into::<[u8; 32]>::into(alice.clone())).into()), + None, + )); + // claim the spend. + assert_ok!(Treasury::payout(bob_signed.clone(), 0)); + + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Assets = ::Assets; + + // assert events triggered by xcm pay program + // 1. treasury asset transferred to spend beneficiary + // 2. response to Relay Chain treasury pallet instance sent back + // 3. XCM program completed + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => { + id: id == &ASSET_ID, + from: from == &treasury_account, + to: to == &alice, + amount: amount == &SPEND_AMOUNT, + }, + RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + // beneficiary received the assets from the treasury. + assert_eq!(>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,); + }); + + Rococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Treasury = ::Treasury; + + // check the payment status to ensure the response from the AssetHub was received. + assert_ok!(Treasury::check_status(bob_signed, 0)); + assert_expected_events!( + Rococo, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::SpendProcessed { .. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml new file mode 100644 index 00000000000..d1dbef9fc41 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "collectives-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Collectives Westend runtime integration tests with xcm-emulator" +publish = false + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +assert_matches = "1.5.0" + +# Substrate +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } +pallet-utility = { path = "../../../../../../../substrate/frame/utility", default-features = false } + +# Polkadot +polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } +westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +testnet-parachains-constants = { path = "../../../../../runtimes/constants", features = ["westend"] } +asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } +collectives-westend-runtime = { path = "../../../../../runtimes/collectives/collectives-westend" } +cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../../../pallets/xcmp-queue" } +cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs new file mode 100644 index 00000000000..97239330216 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use xcm::{prelude::*, v3}; + +pub use emulated_integration_tests_common::xcm_emulator::{ + assert_expected_events, bx, Chain, RelayChain as Relay, TestExt, +}; +pub use westend_system_emulated_network::{ + asset_hub_westend_emulated_chain::AssetHubWestendParaPallet as AssetHubWestendPallet, + collectives_westend_emulated_chain::CollectivesWestendParaPallet as CollectivesWestendPallet, + westend_emulated_chain::WestendRelayPallet as WestendPallet, + AssetHubWestendPara as AssetHubWestend, CollectivesWestendPara as CollectivesWestend, + WestendRelay as Westend, +}; + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs new file mode 100644 index 00000000000..bde1220e249 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -0,0 +1,236 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use asset_hub_westend_runtime::xcm_config::LocationToAccountId as AssetHubLocationToAccountId; +use emulated_integration_tests_common::accounts::ALICE; +use frame_support::{ + assert_ok, dispatch::RawOrigin, instances::Instance1, sp_runtime::traits::Dispatchable, + traits::fungible::Inspect, +}; +use polkadot_runtime_common::impls::VersionedLocatableAsset; +use westend_runtime::OriginCaller; +use westend_runtime_constants::currency::UNITS; +use xcm_executor::traits::ConvertLocation; + +// Fund Fellowship Treasury from Westend Treasury and spend from Fellowship Treasury. +#[test] +fn fellowship_treasury_spend() { + // initial treasury balance on Asset Hub in WNDs. + let treasury_balance = 20_000_000 * UNITS; + // target fellowship balance on Asset Hub in WNDs. + let fellowship_treasury_balance = 1_000_000 * UNITS; + // fellowship first spend balance in WNDs. + let fellowship_spend_balance = 10_000 * UNITS; + + let init_alice_balance = AssetHubWestend::execute_with(|| { + <::Balances as Inspect<_>>::balance( + &AssetHubWestend::account_id_of(ALICE), + ) + }); + + Westend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type Runtime = ::Runtime; + type Balances = ::Balances; + type Treasury = ::Treasury; + + // Fund Treasury account on Asset Hub with WNDs. + + let root = ::RuntimeOrigin::root(); + let treasury_account = Treasury::account_id(); + + // Mist assets to Treasury account on Relay Chain. + assert_ok!(Balances::force_set_balance( + root.clone(), + treasury_account.clone().into(), + treasury_balance * 2, + )); + + let native_asset = Location::here(); + let asset_hub_location: Location = [Parachain(1000)].into(); + let treasury_location: Location = (Parent, PalletInstance(37)).into(); + + let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { + as_origin: bx!(OriginCaller::system(RawOrigin::Signed(treasury_account))), + call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { + dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::V4(treasury_location)), + assets: bx!(VersionedAssets::V4( + Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() + )), + fee_asset_item: 0, + })), + }); + + // Dispatched from Root to `dispatch_as` `Signed(treasury_account)`. + assert_ok!(teleport_call.dispatch(root)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + Westend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type Treasury = ::Treasury; + + // Fund Fellowship Treasury from Westend Treasury. + + let treasury_origin: RuntimeOrigin = + westend_runtime::governance::pallet_custom_origins::Origin::Treasurer.into(); + let fellowship_treasury_location: Location = + Location::new(1, [Parachain(1001), PalletInstance(65)]); + let asset_hub_location: Location = [Parachain(1000)].into(); + let native_asset = Location::parent(); + + let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { + asset_kind: bx!(VersionedLocatableAsset::V4 { + location: asset_hub_location.clone(), + asset_id: native_asset.into(), + }), + amount: fellowship_treasury_balance, + beneficiary: bx!(VersionedLocation::V4(fellowship_treasury_location)), + valid_from: None, + }); + + assert_ok!(treasury_spend_call.dispatch(treasury_origin)); + + // Claim the spend. + + let alice_signed = RuntimeOrigin::signed(Westend::account_id_of(ALICE)); + assert_ok!(Treasury::payout(alice_signed.clone(), 0)); + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::Treasury(pallet_treasury::Event::AssetSpendApproved { .. }) => {}, + RuntimeEvent::Treasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Balances = ::Balances; + + // Ensure that the funds deposited to the Fellowship Treasury account. + + let fellowship_treasury_location: Location = + Location::new(1, [Parachain(1001), PalletInstance(65)]); + let fellowship_treasury_account = + AssetHubLocationToAccountId::convert_location(&fellowship_treasury_location).unwrap(); + + assert_eq!( + >::balance(&fellowship_treasury_account), + fellowship_treasury_balance + ); + + // Assert events triggered by xcm pay program: + // 1. treasury asset transferred to spend beneficiary; + // 2. response to Relay Chain Treasury pallet instance sent back; + // 3. XCM program completed; + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { .. }) => {}, + RuntimeEvent::ParachainSystem(cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); + + CollectivesWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type FellowshipTreasury = + ::FellowshipTreasury; + + // Fund Alice account from Fellowship Treasury. + + let fellows_origin: RuntimeOrigin = + collectives_westend_runtime::fellowship::pallet_fellowship_origins::Origin::Fellows + .into(); + let asset_hub_location: Location = (Parent, Parachain(1000)).into(); + let native_asset = Location::parent(); + + let alice_location: Location = [Junction::AccountId32 { + network: None, + id: CollectivesWestend::account_id_of(ALICE).into(), + }] + .into(); + + let fellowship_treasury_spend_call = + RuntimeCall::FellowshipTreasury(pallet_treasury::Call::::spend { + asset_kind: bx!(VersionedLocatableAsset::V4 { + location: asset_hub_location, + asset_id: native_asset.into(), + }), + amount: fellowship_spend_balance, + beneficiary: bx!(VersionedLocation::V4(alice_location)), + valid_from: None, + }); + + assert_ok!(fellowship_treasury_spend_call.dispatch(fellows_origin)); + + // Claim the spend. + + let alice_signed = RuntimeOrigin::signed(CollectivesWestend::account_id_of(ALICE)); + assert_ok!(FellowshipTreasury::payout(alice_signed.clone(), 0)); + + assert_expected_events!( + CollectivesWestend, + vec![ + RuntimeEvent::FellowshipTreasury(pallet_treasury::Event::AssetSpendApproved { .. }) => {}, + RuntimeEvent::FellowshipTreasury(pallet_treasury::Event::Paid { .. }) => {}, + ] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type Balances = ::Balances; + + // Ensure that the funds deposited to Alice account. + + let alice_account = AssetHubWestend::account_id_of(ALICE); + assert_eq!( + >::balance(&alice_account), + fellowship_spend_balance + init_alice_balance + ); + + // Assert events triggered by xcm pay program: + // 1. treasury asset transferred to spend beneficiary; + // 2. response to Relay Chain Treasury pallet instance sent back; + // 3. XCM program completed; + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Transfer { .. }) => {}, + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs new file mode 100644 index 00000000000..a9f65df34b6 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs @@ -0,0 +1,16 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod fellowship_treasury; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 3816d2ed848..94765287637 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -21,13 +21,16 @@ mod tracks; use crate::{ weights, xcm_config::{FellowshipAdminBodyId, LocationToAccountId, TreasurerBodyId, UsdtAssetHub}, - AccountId, AssetRate, Balance, Balances, FellowshipReferenda, GovernanceLocation, Preimage, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, WestendTreasuryAccount, DAYS, + AccountId, AssetRate, Balance, Balances, FellowshipReferenda, GovernanceLocation, + ParachainInfo, Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, + WestendTreasuryAccount, DAYS, }; +use cumulus_primitives_core::ParaId; use frame_support::{ parameter_types, traits::{ - EitherOf, EitherOfDiverse, MapSuccess, NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, + tokens::UnityOrOuterConversion, EitherOf, EitherOfDiverse, FromContains, MapSuccess, + NeverEnsureOrigin, OriginTrait, TryWithMorphedArg, }, PalletId, }; @@ -40,10 +43,10 @@ use pallet_ranked_collective::EnsureOfRank; use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use parachains_common::impls::ToParentTreasury; use polkadot_runtime_common::impls::{ - LocatableAssetConverter, VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, VersionedLocatableAsset, VersionedLocationConverter, }; use sp_arithmetic::Permill; -use sp_core::{ConstU128, ConstU32}; +use sp_core::{ConstU128, ConstU32, ConstU8}; use sp_runtime::traits::{ConstU16, ConvertToValue, IdentityLookup, Replace, TakeFirst}; use testnet_parachains_constants::westend::{account, currency::GRAND}; use westend_runtime_constants::time::HOURS; @@ -263,6 +266,7 @@ parameter_types! { // The asset's interior location for the paying account. This is the Fellowship Treasury // pallet instance (which sits at index 65). pub FellowshipTreasuryInteriorLocation: InteriorLocation = PalletInstance(65).into(); + pub SelfParaId: ParaId = ParachainInfo::parachain_id(); } #[cfg(feature = "runtime-benchmarks")] @@ -345,7 +349,15 @@ impl pallet_treasury::Config for Runtime { type Paymaster = FellowshipTreasuryPaymaster; #[cfg(feature = "runtime-benchmarks")] type Paymaster = PayWithEnsure>>; - type BalanceConverter = AssetRate; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsSiblingSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; type PayoutPeriod = ConstU32<{ 30 * DAYS }>; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments< diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index cc1243790c2..85531e9c04f 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -19,7 +19,7 @@ use frame_support::traits::{ fungible::{Balanced, Credit}, tokens::imbalance::ResolveTo, - Imbalance, OnUnbalanced, + Contains, ContainsPair, Imbalance, OnUnbalanced, }; use pallet_treasury::TreasuryAccountId; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -156,6 +156,26 @@ impl TryConvert<&VersionedLocation, xcm::latest::Location> for VersionedLocation } } +/// Adapter for [`Contains`] trait to match [`VersionedLocatableAsset`] type converted to the latest +/// version of itself where it's location matched by `L` and it's asset id by `A` parameter types. +pub struct ContainsParts(core::marker::PhantomData); +impl Contains for ContainsParts +where + C: ContainsPair, +{ + fn contains(asset: &VersionedLocatableAsset) -> bool { + use VersionedLocatableAsset::*; + let (location, asset_id) = match asset.clone() { + V3 { location, asset_id } => match (location.try_into(), asset_id.try_into()) { + (Ok(l), Ok(a)) => (l, a), + _ => return false, + }, + V4 { location, asset_id } => (location, asset_id), + }; + C::contains(&location, &asset_id.0) + } +} + #[cfg(feature = "runtime-benchmarks")] pub mod benchmarks { use super::VersionedLocatableAsset; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 740a6240d39..ba80fa6942c 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -25,7 +25,10 @@ use beefy_primitives::{ ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, mmr::{BeefyDataProvider, MmrLeafVersion}, }; -use frame_support::dynamic_params::{dynamic_pallet_params, dynamic_params}; +use frame_support::{ + dynamic_params::{dynamic_pallet_params, dynamic_params}, + traits::FromContains, +}; use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ @@ -40,7 +43,8 @@ use rococo_runtime_constants::system_parachain::BROKER_ID; use runtime_common::{ assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ - LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, + VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, traits::{Leaser, OnSwap}, @@ -74,10 +78,10 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EnsureOrigin, - EnsureOriginWithArg, EverythingBut, InstanceFilter, KeyOwnerProofSystem, - LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, - WithdrawReasons, + fungible::HoldConsideration, tokens::UnityOrOuterConversion, Contains, EitherOf, + EitherOfDiverse, EnsureOrigin, EnsureOriginWithArg, EverythingBut, InstanceFilter, + KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, + StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -87,7 +91,7 @@ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; use pallet_session::historical as session_historical; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; -use sp_core::{ConstU128, OpaqueMetadata, H256}; +use sp_core::{ConstU128, ConstU8, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ @@ -523,7 +527,15 @@ impl pallet_treasury::Config for Runtime { LocatableAssetConverter, VersionedLocationConverter, >; - type BalanceConverter = AssetRate; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsChildSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; type PayoutPeriod = PayoutSpendPeriod; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a119d78b83a..a06a1e1f7fc 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -31,9 +31,9 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, - InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, - ProcessMessageError, WithdrawReasons, + fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstU32, Contains, EitherOf, + EitherOfDiverse, EverythingBut, FromContains, InstanceFilter, KeyOwnerProofSystem, + LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, PalletId, @@ -57,7 +57,8 @@ use runtime_common::{ elections::OnChainAccuracy, identity_migrator, impl_runtime_weights, impls::{ - LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedLocationConverter, + ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, + VersionedLocationConverter, }, paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, traits::{Leaser, OnSwap}, @@ -80,7 +81,7 @@ use runtime_parachains::{ shared as parachains_shared, }; use scale_info::TypeInfo; -use sp_core::{OpaqueMetadata, RuntimeDebug, H256}; +use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ create_runtime_str, curve::PiecewiseLinear, @@ -712,7 +713,15 @@ impl pallet_treasury::Config for Runtime { LocatableAssetConverter, VersionedLocationConverter, >; - type BalanceConverter = AssetRate; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsChildSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; type PayoutPeriod = PayoutSpendPeriod; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = runtime_common::impls::benchmarks::TreasuryArguments; diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index b8923a4d5c6..c0b328f38e9 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -322,6 +322,29 @@ impl> Contains for IsChildSystemParachain } } +/// Matches if the given location is a system-level sibling parachain. +pub struct IsSiblingSystemParachain(PhantomData<(ParaId, SelfParaId)>); +impl + Eq, SelfParaId: Get> Contains + for IsSiblingSystemParachain +{ + fn contains(l: &Location) -> bool { + matches!( + l.unpack(), + (1, [Junction::Parachain(id)]) + if SelfParaId::get() != ParaId::from(*id) && ParaId::from(*id).is_system(), + ) + } +} + +/// Matches if the given location contains only the specified amount of parents and no interior +/// junctions. +pub struct IsParentsOnly(PhantomData); +impl> Contains for IsParentsOnly { + fn contains(t: &Location) -> bool { + t.contains_parents_only(Count::get()) + } +} + /// Allows only messages if the generic `ResponseHandler` expects them via `expecting_response`. pub struct AllowKnownQueryResponses(PhantomData); impl ShouldExecute for AllowKnownQueryResponses { diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index c3400cc72b4..bd4a4c941c9 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -37,8 +37,8 @@ mod barriers; pub use barriers::{ AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, IsChildSystemParachain, RespectSuspension, TakeWeightCredit, TrailingSetTopicAsId, - WithComputedOrigin, + DenyThenTry, IsChildSystemParachain, IsParentsOnly, IsSiblingSystemParachain, + RespectSuspension, TakeWeightCredit, TrailingSetTopicAsId, WithComputedOrigin, }; mod controller; diff --git a/prdoc/pr_3659.prdoc b/prdoc/pr_3659.prdoc new file mode 100644 index 00000000000..393844d822d --- /dev/null +++ b/prdoc/pr_3659.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Unity Balance Conversion for Different IDs of Native Asset + +doc: + - audience: Runtime Dev + description: | + Introduce types to define 1:1 balance conversion for different relative asset ids/locations + of native asset for `ConversionToAssetBalance` trait bounds. + +crates: [ ] \ No newline at end of file diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 66777cef7b8..a423656c394 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -36,7 +36,7 @@ mod members; pub use members::{AllowAll, DenyAll, Filter}; pub use members::{ AsContains, ChangeMembers, Contains, ContainsLengthBound, ContainsPair, Equals, Everything, - EverythingBut, FromContainsPair, InitializeMembers, InsideBoth, IsInVec, Nothing, + EverythingBut, FromContains, FromContainsPair, InitializeMembers, InsideBoth, IsInVec, Nothing, RankedMembers, RankedMembersSwapHandler, SortedMembers, TheseExcept, }; diff --git a/substrate/frame/support/src/traits/members.rs b/substrate/frame/support/src/traits/members.rs index 3a6e3719593..53de84ab224 100644 --- a/substrate/frame/support/src/traits/members.rs +++ b/substrate/frame/support/src/traits/members.rs @@ -66,6 +66,15 @@ impl> Contains<(A, B)> for FromContainsPair { } } +/// A [`ContainsPair`] implementation that has a `Contains` implementation for each member of the +/// pair. +pub struct FromContains(PhantomData<(CA, CB)>); +impl, CB: Contains> ContainsPair for FromContains { + fn contains(a: &A, b: &B) -> bool { + CA::contains(a) && CB::contains(b) + } +} + /// A [`Contains`] implementation that contains every value. pub enum Everything {} impl Contains for Everything { diff --git a/substrate/frame/support/src/traits/tokens.rs b/substrate/frame/support/src/traits/tokens.rs index 3635311e643..8842b205801 100644 --- a/substrate/frame/support/src/traits/tokens.rs +++ b/substrate/frame/support/src/traits/tokens.rs @@ -31,7 +31,7 @@ pub mod pay; pub use misc::{ AssetId, Balance, BalanceStatus, ConversionFromAssetBalance, ConversionToAssetBalance, ConvertRank, DepositConsequence, ExistenceRequirement, Fortitude, GetSalary, Locker, Precision, - Preservation, Provenance, Restriction, UnityAssetBalanceConversion, WithdrawConsequence, - WithdrawReasons, + Preservation, Provenance, Restriction, UnityAssetBalanceConversion, UnityOrOuterConversion, + WithdrawConsequence, WithdrawReasons, }; pub use pay::{Pay, PayFromAccount, PaymentStatus}; diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index a4dd5e49142..424acb1d550 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -17,6 +17,7 @@ //! Miscellaneous types. +use crate::traits::Contains; use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; @@ -299,6 +300,33 @@ where fn ensure_successful(_: AssetId) {} } +/// Implements [`ConversionFromAssetBalance`], allowing for a 1:1 balance conversion of the asset +/// when it meets the conditions specified by `C`. If the conditions are not met, the conversion is +/// delegated to `O`. +pub struct UnityOrOuterConversion(core::marker::PhantomData<(C, O)>); +impl + ConversionFromAssetBalance for UnityOrOuterConversion +where + C: Contains, + O: ConversionFromAssetBalance, + AssetBalance: Into, +{ + type Error = O::Error; + fn from_asset_balance( + balance: AssetBalance, + asset_id: AssetId, + ) -> Result { + if C::contains(&asset_id) { + return Ok(balance.into()); + } + O::from_asset_balance(balance, asset_id) + } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(asset_id: AssetId) { + O::ensure_successful(asset_id) + } +} + /// Trait to handle NFT locking mechanism to ensure interactions with the asset can be implemented /// downstream to extend logic of Uniques/Nfts current functionality. pub trait Locker { -- GitLab From e81322bc3e8192b536067fed3ef9e20f2752c376 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 17 Apr 2024 01:38:35 +0200 Subject: [PATCH 012/107] Contracts verify benchmark block (#4130) Add verify statement to ensure that benchmarks call do not revert Also updated [benchmarks](https://weights.tasty.limo/compare?unit=time&ignore_errors=true&threshold=10&method=asymptotic&repo=polkadot-sdk&old=master&new=pg/verify-benchmarks&path_pattern=substrate%2Fframe%2Fcontracts%2Fsrc%2Fweights.rs) --------- Co-authored-by: command-bot <> --- .../frame/contracts/src/benchmarking/mod.rs | 278 +++- substrate/frame/contracts/src/weights.rs | 1242 +++++++++-------- 2 files changed, 845 insertions(+), 675 deletions(-) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 676fd320a17..952ef180be2 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -623,10 +623,13 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_caller(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_caller", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -659,10 +662,13 @@ mod benchmarks { for acc in accounts.iter() { >::insert(acc, info.clone()); } + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -703,19 +709,25 @@ mod benchmarks { for acc in accounts.iter() { >::insert(acc, info.clone()); } + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_own_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_own_code_hash", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -732,10 +744,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -754,73 +769,97 @@ mod benchmarks { let mut setup = CallSetup::::new(code); setup.set_origin(Origin::Root); call_builder!(func, setup: setup); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_address(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_address", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_gas_left(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal1", "gas_left", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_balance(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_balance", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_value_transferred(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_value_transferred", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_minimum_balance(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_minimum_balance", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_block_number(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_block_number", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] fn seal_now(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::getter("seal0", "seal_now", r)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -851,10 +890,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -880,10 +922,13 @@ mod benchmarks { }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -944,10 +989,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] @@ -970,10 +1018,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // The same argument as for `seal_return` is true here. @@ -1108,10 +1159,13 @@ mod benchmarks { }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // Overhead of calling the function without any topic. @@ -1140,10 +1194,13 @@ mod benchmarks { }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // Benchmark the overhead that topics generate. @@ -1177,10 +1234,13 @@ mod benchmarks { }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // Benchmark debug_message call with zero input data. @@ -1210,10 +1270,13 @@ mod benchmarks { let mut setup = CallSetup::::new(code); setup.enable_debug_message(); call_builder!(func, setup: setup); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1260,10 +1323,13 @@ mod benchmarks { let mut setup = CallSetup::::new(code); setup.enable_debug_message(); call_builder!(func, setup: setup); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); assert_eq!(setup.debug_message().unwrap().len() as u32, i); Ok(()) } @@ -1324,10 +1390,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; } + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1366,10 +1435,13 @@ mod benchmarks { false, ) .map_err(|_| "Failed to write to storage during setup.")?; + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1409,10 +1481,13 @@ mod benchmarks { false, ) .map_err(|_| "Failed to write to storage during setup.")?; + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1463,10 +1538,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1504,10 +1582,12 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1564,10 +1644,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1613,10 +1696,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1667,10 +1753,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1708,10 +1797,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1767,10 +1859,13 @@ mod benchmarks { .map_err(|_| "Failed to write to storage during setup.")?; } >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1816,10 +1911,13 @@ mod benchmarks { ) .map_err(|_| "Failed to write to storage during setup.")?; >::insert(&instance.account_id, info); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -1867,10 +1965,12 @@ mod benchmarks { assert_eq!(T::Currency::total_balance(account), 0u32.into()); } + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); for account in &accounts { assert_eq!(T::Currency::total_balance(account), value); @@ -1947,10 +2047,13 @@ mod benchmarks { let mut setup = CallSetup::::new(code); setup.set_storage_deposit_limit(BalanceOf::::from(u32::MAX.into())); call_builder!(func, setup: setup); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2002,10 +2105,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2057,10 +2163,13 @@ mod benchmarks { let mut setup = CallSetup::::new(code); setup.set_data(vec![42; c as usize]); call_builder!(func, setup: setup); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2165,10 +2274,13 @@ mod benchmarks { return Err("Expected that contract does not exist at this point.".into()); } } + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); for addr in &addresses { ContractInfoOf::::get(&addr).ok_or("Contract should have been instantiated")?; } @@ -2240,10 +2352,13 @@ mod benchmarks { let mut setup = CallSetup::::new(code); setup.set_balance(value + (Pallet::::min_balance() * 2u32.into())); call_builder!(func, setup: setup); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2251,80 +2366,104 @@ mod benchmarks { #[benchmark(pov_mode = Measured)] fn seal_hash_sha2_256(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", r, 0)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] fn seal_hash_sha2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", 1, n)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] fn seal_hash_keccak_256(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", r, 0)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] fn seal_hash_keccak_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", 1, n)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] fn seal_hash_blake2_256(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", r, 0)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] fn seal_hash_blake2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", 1, n)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // Only the overhead of calling the function itself with minimal arguments. #[benchmark(pov_mode = Measured)] fn seal_hash_blake2_128(r: Linear<0, API_BENCHMARK_RUNS>) { call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", r, 0)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] fn seal_hash_blake2_128_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", 1, n)); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // `n`: Message input length to verify in bytes. @@ -2368,10 +2507,13 @@ mod benchmarks { }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2425,10 +2567,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2475,10 +2620,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2515,10 +2663,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2556,10 +2707,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2598,10 +2752,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2656,10 +2813,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2713,10 +2873,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); Ok(()) } @@ -2734,10 +2897,13 @@ mod benchmarks { ..Default::default() }); call_builder!(func, code); + + let res; #[block] { - func.call(); + res = func.call(); } + assert_eq!(res.did_revert(), false); } // We load `i64` values from random linear memory locations and store the loaded diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index ca7f58cf5b0..b95b1d1a9a2 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` @@ -143,8 +143,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_047_000 picoseconds. - Weight::from_parts(2_116_000, 1627) + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_274_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -154,10 +154,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_474_000 picoseconds. - Weight::from_parts(12_767_000, 442) - // Standard Error: 1_081 - .saturating_add(Weight::from_parts(1_187_278, 0).saturating_mul(k.into())) + // Minimum execution time: 12_863_000 picoseconds. + Weight::from_parts(13_188_000, 442) + // Standard Error: 1_053 + .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -171,10 +171,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_307_000 picoseconds. - Weight::from_parts(8_939_322, 6149) + // Minimum execution time: 8_432_000 picoseconds. + Weight::from_parts(9_203_290, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -187,8 +187,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_915_000 picoseconds. - Weight::from_parts(17_638_000, 6450) + // Minimum execution time: 17_177_000 picoseconds. + Weight::from_parts(17_663_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,10 +201,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_607_000 picoseconds. - Weight::from_parts(1_979_323, 3635) - // Standard Error: 1_018 - .saturating_add(Weight::from_parts(1_196_162, 0).saturating_mul(k.into())) + // Minimum execution time: 3_636_000 picoseconds. + Weight::from_parts(3_774_000, 3635) + // Standard Error: 542 + .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -225,10 +225,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `328 + c * (1 ±0)` // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 21_056_000 picoseconds. - Weight::from_parts(21_633_895, 6266) + // Minimum execution time: 21_585_000 picoseconds. + Weight::from_parts(22_069_944, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -239,8 +239,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_860_000 picoseconds. - Weight::from_parts(13_525_000, 6380) + // Minimum execution time: 13_283_000 picoseconds. + Weight::from_parts(14_015_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -254,8 +254,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 46_926_000 picoseconds. - Weight::from_parts(47_828_000, 6292) + // Minimum execution time: 48_022_000 picoseconds. + Weight::from_parts(49_627_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,8 +267,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 55_081_000 picoseconds. - Weight::from_parts(56_899_000, 6534) + // Minimum execution time: 58_374_000 picoseconds. + Weight::from_parts(59_615_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -278,8 +278,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 12_595_000 picoseconds. - Weight::from_parts(13_059_000, 6349) + // Minimum execution time: 12_559_000 picoseconds. + Weight::from_parts(12_947_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -290,7 +290,7 @@ impl WeightInfo for SubstrateWeight { // Measured: `142` // Estimated: `1627` // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_663_000, 1627) + Weight::from_parts(2_680_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,8 +302,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_115_000 picoseconds. - Weight::from_parts(12_506_000, 3631) + // Minimum execution time: 12_625_000 picoseconds. + Weight::from_parts(13_094_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -313,8 +313,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_757_000 picoseconds. - Weight::from_parts(5_082_000, 3607) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_182_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -325,8 +325,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_017_000 picoseconds. - Weight::from_parts(6_421_000, 3632) + // Minimum execution time: 6_319_000 picoseconds. + Weight::from_parts(6_582_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -337,8 +337,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_238_000 picoseconds. - Weight::from_parts(6_587_000, 3607) + // Minimum execution time: 6_532_000 picoseconds. + Weight::from_parts(6_909_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -363,10 +363,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804 + c * (1 ±0)` // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 288_968_000 picoseconds. - Weight::from_parts(267_291_922, 9217) - // Standard Error: 78 - .saturating_add(Weight::from_parts(34_879, 0).saturating_mul(c.into())) + // Minimum execution time: 305_778_000 picoseconds. + Weight::from_parts(282_321_249, 9217) + // Standard Error: 72 + .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -398,14 +398,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_948_426_000 picoseconds. - Weight::from_parts(440_017_623, 8740) - // Standard Error: 555 - .saturating_add(Weight::from_parts(71_483, 0).saturating_mul(c.into())) - // Standard Error: 66 - .saturating_add(Weight::from_parts(1_831, 0).saturating_mul(i.into())) - // Standard Error: 66 - .saturating_add(Weight::from_parts(1_694, 0).saturating_mul(s.into())) + // Minimum execution time: 3_810_809_000 picoseconds. + Weight::from_parts(739_511_598, 8740) + // Standard Error: 140 + .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -435,12 +435,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 2_011_037_000 picoseconds. - Weight::from_parts(2_047_025_000, 8982) - // Standard Error: 28 - .saturating_add(Weight::from_parts(968, 0).saturating_mul(i.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(780, 0).saturating_mul(s.into())) + // Minimum execution time: 1_986_789_000 picoseconds. + Weight::from_parts(2_017_466_000, 8982) + // Standard Error: 26 + .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -464,8 +464,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 202_190_000 picoseconds. - Weight::from_parts(209_378_000, 9244) + // Minimum execution time: 210_724_000 picoseconds. + Weight::from_parts(218_608_000, 9244) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -486,10 +486,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 271_161_000 picoseconds. - Weight::from_parts(279_218_977, 6085) - // Standard Error: 80 - .saturating_add(Weight::from_parts(33_973, 0).saturating_mul(c.into())) + // Minimum execution time: 271_259_000 picoseconds. + Weight::from_parts(298_852_854, 6085) + // Standard Error: 65 + .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -510,10 +510,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 273_684_000 picoseconds. - Weight::from_parts(284_348_722, 6085) - // Standard Error: 79 - .saturating_add(Weight::from_parts(34_205, 0).saturating_mul(c.into())) + // Minimum execution time: 278_167_000 picoseconds. + Weight::from_parts(311_888_941, 6085) + // Standard Error: 58 + .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -531,8 +531,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 45_150_000 picoseconds. - Weight::from_parts(46_780_000, 3780) + // Minimum execution time: 47_403_000 picoseconds. + Weight::from_parts(48_707_000, 3780) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -548,8 +548,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 34_738_000 picoseconds. - Weight::from_parts(35_918_000, 8967) + // Minimum execution time: 35_361_000 picoseconds. + Weight::from_parts(36_714_000, 8967) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -558,10 +558,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_094_000 picoseconds. - Weight::from_parts(10_253_702, 0) - // Standard Error: 223 - .saturating_add(Weight::from_parts(250_757, 0).saturating_mul(r.into())) + // Minimum execution time: 9_340_000 picoseconds. + Weight::from_parts(9_360_237, 0) + // Standard Error: 269 + .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) } /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -570,10 +570,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `509 + r * (77 ±0)` // Estimated: `1467 + r * (2552 ±0)` - // Minimum execution time: 9_102_000 picoseconds. - Weight::from_parts(9_238_000, 1467) - // Standard Error: 6_076 - .saturating_add(Weight::from_parts(3_293_012, 0).saturating_mul(r.into())) + // Minimum execution time: 9_059_000 picoseconds. + Weight::from_parts(9_201_000, 1467) + // Standard Error: 5_643 + .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) } @@ -584,10 +584,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `517 + r * (170 ±0)` // Estimated: `1468 + r * (2645 ±0)` - // Minimum execution time: 9_255_000 picoseconds. - Weight::from_parts(9_406_000, 1468) - // Standard Error: 6_826 - .saturating_add(Weight::from_parts(4_205_039, 0).saturating_mul(r.into())) + // Minimum execution time: 9_220_000 picoseconds. + Weight::from_parts(9_399_000, 1468) + // Standard Error: 6_194 + .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) } @@ -596,50 +596,50 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_388_000 picoseconds. - Weight::from_parts(9_322_209, 0) - // Standard Error: 269 - .saturating_add(Weight::from_parts(358_189, 0).saturating_mul(r.into())) + // Minimum execution time: 9_707_000 picoseconds. + Weight::from_parts(10_100_456, 0) + // Standard Error: 234 + .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_300_000 picoseconds. - Weight::from_parts(10_268_326, 0) - // Standard Error: 72 - .saturating_add(Weight::from_parts(104_650, 0).saturating_mul(r.into())) + // Minimum execution time: 9_524_000 picoseconds. + Weight::from_parts(10_813_389, 0) + // Standard Error: 76 + .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_162_000 picoseconds. - Weight::from_parts(10_059_984, 0) - // Standard Error: 87 - .saturating_add(Weight::from_parts(87_627, 0).saturating_mul(r.into())) + // Minimum execution time: 9_799_000 picoseconds. + Weight::from_parts(10_886_744, 0) + // Standard Error: 75 + .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_193_000 picoseconds. - Weight::from_parts(10_160_715, 0) - // Standard Error: 152 - .saturating_add(Weight::from_parts(263_703, 0).saturating_mul(r.into())) + // Minimum execution time: 9_895_000 picoseconds. + Weight::from_parts(10_658_338, 0) + // Standard Error: 189 + .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_109_000 picoseconds. - Weight::from_parts(9_766_924, 0) - // Standard Error: 212 - .saturating_add(Weight::from_parts(291_694, 0).saturating_mul(r.into())) + // Minimum execution time: 9_643_000 picoseconds. + Weight::from_parts(10_932_126, 0) + // Standard Error: 153 + .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) @@ -648,10 +648,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3599` - // Minimum execution time: 9_463_000 picoseconds. - Weight::from_parts(9_541_000, 3599) - // Standard Error: 3_075 - .saturating_add(Weight::from_parts(1_606_043, 0).saturating_mul(r.into())) + // Minimum execution time: 9_548_000 picoseconds. + Weight::from_parts(9_737_000, 3599) + // Standard Error: 971 + .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -659,40 +659,40 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_080_000 picoseconds. - Weight::from_parts(8_121_924, 0) - // Standard Error: 198 - .saturating_add(Weight::from_parts(247_527, 0).saturating_mul(r.into())) + // Minimum execution time: 9_172_000 picoseconds. + Weight::from_parts(18_255_933, 0) + // Standard Error: 540 + .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_393_000 picoseconds. - Weight::from_parts(9_999_247, 0) - // Standard Error: 169 - .saturating_add(Weight::from_parts(244_563, 0).saturating_mul(r.into())) + // Minimum execution time: 9_232_000 picoseconds. + Weight::from_parts(9_796_584, 0) + // Standard Error: 208 + .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_236_000 picoseconds. - Weight::from_parts(9_561_435, 0) - // Standard Error: 195 - .saturating_add(Weight::from_parts(239_812, 0).saturating_mul(r.into())) + // Minimum execution time: 9_747_000 picoseconds. + Weight::from_parts(8_733_230, 0) + // Standard Error: 377 + .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_259_000 picoseconds. - Weight::from_parts(10_353_960, 0) - // Standard Error: 216 - .saturating_add(Weight::from_parts(243_754, 0).saturating_mul(r.into())) + // Minimum execution time: 9_214_000 picoseconds. + Weight::from_parts(10_194_153, 0) + // Standard Error: 516 + .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -701,10 +701,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 9_145_000 picoseconds. - Weight::from_parts(16_524_937, 1552) - // Standard Error: 438 - .saturating_add(Weight::from_parts(666_821, 0).saturating_mul(r.into())) + // Minimum execution time: 9_022_000 picoseconds. + Weight::from_parts(22_051_160, 1552) + // Standard Error: 697 + .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -712,10 +712,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_179_000 picoseconds. - Weight::from_parts(8_893_261, 0) - // Standard Error: 215 - .saturating_add(Weight::from_parts(175_586, 0).saturating_mul(r.into())) + // Minimum execution time: 9_135_000 picoseconds. + Weight::from_parts(10_646_215, 0) + // Standard Error: 161 + .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -738,10 +738,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 259_315_000 picoseconds. - Weight::from_parts(137_461_362, 9287) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_388, 0).saturating_mul(n.into())) + // Minimum execution time: 273_896_000 picoseconds. + Weight::from_parts(148_309_654, 9287) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -750,20 +750,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_919_000 picoseconds. - Weight::from_parts(9_465_187, 0) - // Standard Error: 32_481 - .saturating_add(Weight::from_parts(992_912, 0).saturating_mul(r.into())) + // Minimum execution time: 8_906_000 picoseconds. + Weight::from_parts(9_264_446, 0) + // Standard Error: 19_760 + .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_244_000 picoseconds. - Weight::from_parts(10_654_989, 0) + // Minimum execution time: 10_266_000 picoseconds. + Weight::from_parts(10_602_261, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(315, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -792,10 +792,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `4805 + r * (2121 ±0)` // Estimated: `13220 + r * (81321 ±0)` - // Minimum execution time: 303_028_000 picoseconds. - Weight::from_parts(323_032_397, 13220) - // Standard Error: 848_406 - .saturating_add(Weight::from_parts(242_988_002, 0).saturating_mul(r.into())) + // Minimum execution time: 295_922_000 picoseconds. + Weight::from_parts(322_472_877, 13220) + // Standard Error: 993_812 + .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().reads((36_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -809,10 +809,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 9_227_000 picoseconds. - Weight::from_parts(14_055_283, 1561) - // Standard Error: 758 - .saturating_add(Weight::from_parts(1_104_996, 0).saturating_mul(r.into())) + // Minimum execution time: 9_427_000 picoseconds. + Weight::from_parts(12_996_213, 1561) + // Standard Error: 845 + .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -820,10 +820,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_483_000 picoseconds. - Weight::from_parts(20_453_059, 0) - // Standard Error: 3_271 - .saturating_add(Weight::from_parts(1_713_468, 0).saturating_mul(r.into())) + // Minimum execution time: 9_304_000 picoseconds. + Weight::from_parts(25_678_842, 0) + // Standard Error: 1_855 + .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -833,12 +833,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 23_517_000 picoseconds. - Weight::from_parts(15_543_153, 990) - // Standard Error: 13_814 - .saturating_add(Weight::from_parts(2_357_255, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(573, 0).saturating_mul(n.into())) + // Minimum execution time: 23_425_000 picoseconds. + Weight::from_parts(15_229_010, 990) + // Standard Error: 14_380 + .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -848,20 +848,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_448_000 picoseconds. - Weight::from_parts(9_845_841, 0) - // Standard Error: 58 - .saturating_add(Weight::from_parts(105_442, 0).saturating_mul(r.into())) + // Minimum execution time: 11_117_000 picoseconds. + Weight::from_parts(12_887_533, 0) + // Standard Error: 83 + .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) } /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_869_000 picoseconds. - Weight::from_parts(11_024_000, 0) + // Minimum execution time: 10_982_000 picoseconds. + Weight::from_parts(11_176_000, 0) // Standard Error: 8 - .saturating_add(Weight::from_parts(991, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -870,10 +870,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_119_000 picoseconds. - Weight::from_parts(9_270_000, 105) - // Standard Error: 8_960 - .saturating_add(Weight::from_parts(5_215_976, 0).saturating_mul(r.into())) + // Minimum execution time: 9_150_000 picoseconds. + Weight::from_parts(9_269_000, 105) + // Standard Error: 8_147 + .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -885,10 +885,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `245` // Estimated: `245` - // Minimum execution time: 17_833_000 picoseconds. - Weight::from_parts(18_940_114, 245) - // Standard Error: 2 - .saturating_add(Weight::from_parts(316, 0).saturating_mul(n.into())) + // Minimum execution time: 19_085_000 picoseconds. + Weight::from_parts(20_007_323, 245) + // Standard Error: 3 + .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -899,10 +899,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_428_000 picoseconds. - Weight::from_parts(19_372_726, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(85, 0).saturating_mul(n.into())) + // Minimum execution time: 19_127_000 picoseconds. + Weight::from_parts(21_152_987, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -914,10 +914,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_335_000 picoseconds. - Weight::from_parts(9_459_000, 105) - // Standard Error: 9_156 - .saturating_add(Weight::from_parts(5_166_621, 0).saturating_mul(r.into())) + // Minimum execution time: 9_264_000 picoseconds. + Weight::from_parts(9_449_000, 105) + // Standard Error: 8_196 + .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -929,10 +929,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_308_000 picoseconds. - Weight::from_parts(19_421_433, 248) + // Minimum execution time: 18_489_000 picoseconds. + Weight::from_parts(19_916_153, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(83, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -944,10 +944,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_184_000 picoseconds. - Weight::from_parts(9_245_000, 105) - // Standard Error: 8_442 - .saturating_add(Weight::from_parts(4_543_991, 0).saturating_mul(r.into())) + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_464_000, 105) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -958,10 +958,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 17_194_000 picoseconds. - Weight::from_parts(19_032_094, 248) + // Minimum execution time: 17_981_000 picoseconds. + Weight::from_parts(19_802_353, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(590, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -972,10 +972,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_380_000 picoseconds. - Weight::from_parts(9_501_000, 105) - // Standard Error: 7_029 - .saturating_add(Weight::from_parts(4_406_690, 0).saturating_mul(r.into())) + // Minimum execution time: 9_891_000 picoseconds. + Weight::from_parts(10_046_000, 105) + // Standard Error: 6_993 + .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -986,10 +986,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 16_400_000 picoseconds. - Weight::from_parts(17_993_941, 248) + // Minimum execution time: 17_229_000 picoseconds. + Weight::from_parts(18_302_733, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(68, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1000,10 +1000,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_109_000 picoseconds. - Weight::from_parts(9_265_000, 105) - // Standard Error: 8_733 - .saturating_add(Weight::from_parts(5_218_811, 0).saturating_mul(r.into())) + // Minimum execution time: 9_323_000 picoseconds. + Weight::from_parts(9_462_000, 105) + // Standard Error: 8_031 + .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -1015,10 +1015,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_423_000 picoseconds. - Weight::from_parts(20_025_132, 248) + // Minimum execution time: 18_711_000 picoseconds. + Weight::from_parts(20_495_670, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(628, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1030,10 +1030,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `770` // Estimated: `4221 + r * (2475 ±0)` - // Minimum execution time: 9_043_000 picoseconds. - Weight::from_parts(9_176_000, 4221) - // Standard Error: 12_901 - .saturating_add(Weight::from_parts(32_297_438, 0).saturating_mul(r.into())) + // Minimum execution time: 9_226_000 picoseconds. + Weight::from_parts(9_394_000, 4221) + // Standard Error: 14_741 + .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -1055,10 +1055,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `520 + r * (170 ±0)` // Estimated: `6463 + r * (2646 ±0)` - // Minimum execution time: 9_299_000 picoseconds. - Weight::from_parts(9_427_000, 6463) - // Standard Error: 101_949 - .saturating_add(Weight::from_parts(244_143_691, 0).saturating_mul(r.into())) + // Minimum execution time: 9_455_000 picoseconds. + Weight::from_parts(9_671_000, 6463) + // Standard Error: 126_080 + .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -1079,11 +1079,11 @@ impl WeightInfo for SubstrateWeight { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (527 ±0)` - // Estimated: `6447 + r * (2583 ±3)` - // Minimum execution time: 9_359_000 picoseconds. - Weight::from_parts(9_425_000, 6447) - // Standard Error: 193_938 - .saturating_add(Weight::from_parts(244_904_401, 0).saturating_mul(r.into())) + // Estimated: `6447 + r * (2583 ±10)` + // Minimum execution time: 9_274_000 picoseconds. + Weight::from_parts(9_437_000, 6447) + // Standard Error: 150_832 + .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) @@ -1106,12 +1106,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `699 + t * (277 ±0)` // Estimated: `6639 + t * (3458 ±0)` - // Minimum execution time: 214_588_000 picoseconds. - Weight::from_parts(129_214_481, 6639) - // Standard Error: 2_468_090 - .saturating_add(Weight::from_parts(32_514_739, 0).saturating_mul(t.into())) + // Minimum execution time: 214_483_000 picoseconds. + Weight::from_parts(122_634_366, 6639) + // Standard Error: 2_499_235 + .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) // Standard Error: 3 - .saturating_add(Weight::from_parts(418, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -1126,10 +1126,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[1, 800]`. @@ -1137,10 +1137,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1097 + r * (188 ±0)` // Estimated: `6990 + r * (2664 ±0)` - // Minimum execution time: 352_925_000 picoseconds. - Weight::from_parts(355_487_000, 6990) - // Standard Error: 261_528 - .saturating_add(Weight::from_parts(337_897_187, 0).saturating_mul(r.into())) + // Minimum execution time: 341_569_000 picoseconds. + Weight::from_parts(360_574_000, 6990) + // Standard Error: 259_746 + .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -1155,10 +1155,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. @@ -1168,12 +1168,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `760 + t * (104 ±0)` // Estimated: `6719 + t * (2549 ±1)` - // Minimum execution time: 1_870_832_000 picoseconds. - Weight::from_parts(949_110_245, 6719) - // Standard Error: 24 - .saturating_add(Weight::from_parts(1_084, 0).saturating_mul(i.into())) - // Standard Error: 24 - .saturating_add(Weight::from_parts(1_206, 0).saturating_mul(s.into())) + // Minimum execution time: 1_863_119_000 picoseconds. + Weight::from_parts(900_189_174, 6719) + // Standard Error: 13_040_979 + .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(7_u64)) @@ -1185,58 +1187,58 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_142_000 picoseconds. - Weight::from_parts(9_787_220, 0) - // Standard Error: 236 - .saturating_add(Weight::from_parts(267_264, 0).saturating_mul(r.into())) + // Minimum execution time: 9_211_000 picoseconds. + Weight::from_parts(11_696_412, 0) + // Standard Error: 388 + .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_485_000 picoseconds. - Weight::from_parts(1_870_250, 0) + // Minimum execution time: 10_296_000 picoseconds. + Weight::from_parts(572_494, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_073, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_005_000 picoseconds. - Weight::from_parts(8_943_937, 0) - // Standard Error: 1_385 - .saturating_add(Weight::from_parts(665_970, 0).saturating_mul(r.into())) + // Minimum execution time: 9_177_000 picoseconds. + Weight::from_parts(8_620_481, 0) + // Standard Error: 249 + .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_965_000 picoseconds. - Weight::from_parts(11_749_746, 0) - // Standard Error: 6 - .saturating_add(Weight::from_parts(3_330, 0).saturating_mul(n.into())) + // Minimum execution time: 11_240_000 picoseconds. + Weight::from_parts(8_696_186, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_400_000 picoseconds. - Weight::from_parts(13_857_546, 0) - // Standard Error: 246 - .saturating_add(Weight::from_parts(326_483, 0).saturating_mul(r.into())) + // Minimum execution time: 9_889_000 picoseconds. + Weight::from_parts(16_103_170, 0) + // Standard Error: 343 + .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_064_000 picoseconds. - Weight::from_parts(1_885_873, 0) + // Minimum execution time: 10_405_000 picoseconds. + Weight::from_parts(2_264_024, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) } @@ -1245,60 +1247,60 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_068_000 picoseconds. - Weight::from_parts(17_169_362, 0) - // Standard Error: 1_580 - .saturating_add(Weight::from_parts(330_195, 0).saturating_mul(r.into())) + // Minimum execution time: 9_215_000 picoseconds. + Weight::from_parts(10_505_632, 0) + // Standard Error: 240 + .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_169_000 picoseconds. - Weight::from_parts(2_159_277, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_200, 0).saturating_mul(n.into())) + // Minimum execution time: 10_440_000 picoseconds. + Weight::from_parts(2_575_889, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 53_863_000 picoseconds. - Weight::from_parts(54_902_157, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(4_588, 0).saturating_mul(n.into())) + // Minimum execution time: 55_119_000 picoseconds. + Weight::from_parts(56_732_248, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_107_000 picoseconds. - Weight::from_parts(24_115_247, 0) - // Standard Error: 7_427 - .saturating_add(Weight::from_parts(41_116_827, 0).saturating_mul(r.into())) + // Minimum execution time: 9_176_000 picoseconds. + Weight::from_parts(9_861_102, 0) + // Standard Error: 6_029 + .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_404_000 picoseconds. - Weight::from_parts(31_763_334, 0) - // Standard Error: 9_833 - .saturating_add(Weight::from_parts(45_529_880, 0).saturating_mul(r.into())) + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(28_785_765, 0) + // Standard Error: 9_160 + .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_409_000 picoseconds. - Weight::from_parts(15_072_835, 0) - // Standard Error: 4_591 - .saturating_add(Weight::from_parts(11_619_283, 0).saturating_mul(r.into())) + // Minimum execution time: 9_206_000 picoseconds. + Weight::from_parts(12_420_664, 0) + // Standard Error: 3_489 + .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) } /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1312,11 +1314,11 @@ impl WeightInfo for SubstrateWeight { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (926 ±0)` - // Estimated: `8969 + r * (3047 ±10)` - // Minimum execution time: 9_269_000 picoseconds. - Weight::from_parts(9_372_000, 8969) - // Standard Error: 61_354 - .saturating_add(Weight::from_parts(26_280_409, 0).saturating_mul(r.into())) + // Estimated: `8969 + r * (3047 ±7)` + // Minimum execution time: 9_219_000 picoseconds. + Weight::from_parts(9_385_000, 8969) + // Standard Error: 45_562 + .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) @@ -1328,10 +1330,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `274 + r * (78 ±0)` // Estimated: `1265 + r * (2553 ±0)` - // Minimum execution time: 9_103_000 picoseconds. - Weight::from_parts(14_404_626, 1265) - // Standard Error: 9_343 - .saturating_add(Weight::from_parts(5_154_949, 0).saturating_mul(r.into())) + // Minimum execution time: 9_355_000 picoseconds. + Weight::from_parts(15_071_309, 1265) + // Standard Error: 9_722 + .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) @@ -1343,10 +1345,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `275 + r * (78 ±0)` // Estimated: `990 + r * (2568 ±0)` - // Minimum execution time: 9_219_000 picoseconds. - Weight::from_parts(14_085_456, 990) - // Standard Error: 11_206 - .saturating_add(Weight::from_parts(4_422_122, 0).saturating_mul(r.into())) + // Minimum execution time: 8_979_000 picoseconds. + Weight::from_parts(14_362_224, 990) + // Standard Error: 9_137 + .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) @@ -1372,10 +1374,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `861 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 269_333_000 picoseconds. - Weight::from_parts(286_922_618, 9282) - // Standard Error: 443 - .saturating_add(Weight::from_parts(168_869, 0).saturating_mul(r.into())) + // Minimum execution time: 269_704_000 picoseconds. + Weight::from_parts(289_916_035, 9282) + // Standard Error: 408 + .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -1385,10 +1387,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_328_000 picoseconds. - Weight::from_parts(14_019_583, 0) - // Standard Error: 171 - .saturating_add(Weight::from_parts(88_751, 0).saturating_mul(r.into())) + // Minimum execution time: 9_361_000 picoseconds. + Weight::from_parts(11_633_836, 0) + // Standard Error: 86 + .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1397,10 +1399,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 9_267_000 picoseconds. - Weight::from_parts(15_304_284, 1704) - // Standard Error: 1_219 - .saturating_add(Weight::from_parts(74_696, 0).saturating_mul(r.into())) + // Minimum execution time: 9_133_000 picoseconds. + Weight::from_parts(13_259_836, 1704) + // Standard Error: 121 + .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1408,10 +1410,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 911_000 picoseconds. - Weight::from_parts(449_666, 0) - // Standard Error: 26 - .saturating_add(Weight::from_parts(14_797, 0).saturating_mul(r.into())) + // Minimum execution time: 851_000 picoseconds. + Weight::from_parts(587_883, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) } } @@ -1423,8 +1425,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_047_000 picoseconds. - Weight::from_parts(2_116_000, 1627) + // Minimum execution time: 2_149_000 picoseconds. + Weight::from_parts(2_274_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1434,10 +1436,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_474_000 picoseconds. - Weight::from_parts(12_767_000, 442) - // Standard Error: 1_081 - .saturating_add(Weight::from_parts(1_187_278, 0).saturating_mul(k.into())) + // Minimum execution time: 12_863_000 picoseconds. + Weight::from_parts(13_188_000, 442) + // Standard Error: 1_053 + .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1451,10 +1453,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_307_000 picoseconds. - Weight::from_parts(8_939_322, 6149) + // Minimum execution time: 8_432_000 picoseconds. + Weight::from_parts(9_203_290, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_190, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1467,8 +1469,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 16_915_000 picoseconds. - Weight::from_parts(17_638_000, 6450) + // Minimum execution time: 17_177_000 picoseconds. + Weight::from_parts(17_663_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1481,10 +1483,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_607_000 picoseconds. - Weight::from_parts(1_979_323, 3635) - // Standard Error: 1_018 - .saturating_add(Weight::from_parts(1_196_162, 0).saturating_mul(k.into())) + // Minimum execution time: 3_636_000 picoseconds. + Weight::from_parts(3_774_000, 3635) + // Standard Error: 542 + .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1505,10 +1507,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `328 + c * (1 ±0)` // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 21_056_000 picoseconds. - Weight::from_parts(21_633_895, 6266) + // Minimum execution time: 21_585_000 picoseconds. + Weight::from_parts(22_069_944, 6266) // Standard Error: 1 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1519,8 +1521,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 12_860_000 picoseconds. - Weight::from_parts(13_525_000, 6380) + // Minimum execution time: 13_283_000 picoseconds. + Weight::from_parts(14_015_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1534,8 +1536,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 46_926_000 picoseconds. - Weight::from_parts(47_828_000, 6292) + // Minimum execution time: 48_022_000 picoseconds. + Weight::from_parts(49_627_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1547,8 +1549,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 55_081_000 picoseconds. - Weight::from_parts(56_899_000, 6534) + // Minimum execution time: 58_374_000 picoseconds. + Weight::from_parts(59_615_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1558,8 +1560,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 12_595_000 picoseconds. - Weight::from_parts(13_059_000, 6349) + // Minimum execution time: 12_559_000 picoseconds. + Weight::from_parts(12_947_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1570,7 +1572,7 @@ impl WeightInfo for () { // Measured: `142` // Estimated: `1627` // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_663_000, 1627) + Weight::from_parts(2_680_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1582,8 +1584,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_115_000 picoseconds. - Weight::from_parts(12_506_000, 3631) + // Minimum execution time: 12_625_000 picoseconds. + Weight::from_parts(13_094_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1593,8 +1595,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_757_000 picoseconds. - Weight::from_parts(5_082_000, 3607) + // Minimum execution time: 4_836_000 picoseconds. + Weight::from_parts(5_182_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1605,8 +1607,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_017_000 picoseconds. - Weight::from_parts(6_421_000, 3632) + // Minimum execution time: 6_319_000 picoseconds. + Weight::from_parts(6_582_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1617,8 +1619,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_238_000 picoseconds. - Weight::from_parts(6_587_000, 3607) + // Minimum execution time: 6_532_000 picoseconds. + Weight::from_parts(6_909_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1643,10 +1645,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804 + c * (1 ±0)` // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 288_968_000 picoseconds. - Weight::from_parts(267_291_922, 9217) - // Standard Error: 78 - .saturating_add(Weight::from_parts(34_879, 0).saturating_mul(c.into())) + // Minimum execution time: 305_778_000 picoseconds. + Weight::from_parts(282_321_249, 9217) + // Standard Error: 72 + .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1678,14 +1680,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `326` // Estimated: `8740` - // Minimum execution time: 3_948_426_000 picoseconds. - Weight::from_parts(440_017_623, 8740) - // Standard Error: 555 - .saturating_add(Weight::from_parts(71_483, 0).saturating_mul(c.into())) - // Standard Error: 66 - .saturating_add(Weight::from_parts(1_831, 0).saturating_mul(i.into())) - // Standard Error: 66 - .saturating_add(Weight::from_parts(1_694, 0).saturating_mul(s.into())) + // Minimum execution time: 3_810_809_000 picoseconds. + Weight::from_parts(739_511_598, 8740) + // Standard Error: 140 + .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -1715,12 +1717,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `563` // Estimated: `8982` - // Minimum execution time: 2_011_037_000 picoseconds. - Weight::from_parts(2_047_025_000, 8982) - // Standard Error: 28 - .saturating_add(Weight::from_parts(968, 0).saturating_mul(i.into())) - // Standard Error: 28 - .saturating_add(Weight::from_parts(780, 0).saturating_mul(s.into())) + // Minimum execution time: 1_986_789_000 picoseconds. + Weight::from_parts(2_017_466_000, 8982) + // Standard Error: 26 + .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) + // Standard Error: 26 + .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1744,8 +1746,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `829` // Estimated: `9244` - // Minimum execution time: 202_190_000 picoseconds. - Weight::from_parts(209_378_000, 9244) + // Minimum execution time: 210_724_000 picoseconds. + Weight::from_parts(218_608_000, 9244) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1766,10 +1768,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 271_161_000 picoseconds. - Weight::from_parts(279_218_977, 6085) - // Standard Error: 80 - .saturating_add(Weight::from_parts(33_973, 0).saturating_mul(c.into())) + // Minimum execution time: 271_259_000 picoseconds. + Weight::from_parts(298_852_854, 6085) + // Standard Error: 65 + .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1790,10 +1792,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6085` - // Minimum execution time: 273_684_000 picoseconds. - Weight::from_parts(284_348_722, 6085) - // Standard Error: 79 - .saturating_add(Weight::from_parts(34_205, 0).saturating_mul(c.into())) + // Minimum execution time: 278_167_000 picoseconds. + Weight::from_parts(311_888_941, 6085) + // Standard Error: 58 + .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1811,8 +1813,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 45_150_000 picoseconds. - Weight::from_parts(46_780_000, 3780) + // Minimum execution time: 47_403_000 picoseconds. + Weight::from_parts(48_707_000, 3780) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1828,8 +1830,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `8967` - // Minimum execution time: 34_738_000 picoseconds. - Weight::from_parts(35_918_000, 8967) + // Minimum execution time: 35_361_000 picoseconds. + Weight::from_parts(36_714_000, 8967) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1838,10 +1840,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_094_000 picoseconds. - Weight::from_parts(10_253_702, 0) - // Standard Error: 223 - .saturating_add(Weight::from_parts(250_757, 0).saturating_mul(r.into())) + // Minimum execution time: 9_340_000 picoseconds. + Weight::from_parts(9_360_237, 0) + // Standard Error: 269 + .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) } /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1850,10 +1852,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `509 + r * (77 ±0)` // Estimated: `1467 + r * (2552 ±0)` - // Minimum execution time: 9_102_000 picoseconds. - Weight::from_parts(9_238_000, 1467) - // Standard Error: 6_076 - .saturating_add(Weight::from_parts(3_293_012, 0).saturating_mul(r.into())) + // Minimum execution time: 9_059_000 picoseconds. + Weight::from_parts(9_201_000, 1467) + // Standard Error: 5_643 + .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) } @@ -1864,10 +1866,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `517 + r * (170 ±0)` // Estimated: `1468 + r * (2645 ±0)` - // Minimum execution time: 9_255_000 picoseconds. - Weight::from_parts(9_406_000, 1468) - // Standard Error: 6_826 - .saturating_add(Weight::from_parts(4_205_039, 0).saturating_mul(r.into())) + // Minimum execution time: 9_220_000 picoseconds. + Weight::from_parts(9_399_000, 1468) + // Standard Error: 6_194 + .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) } @@ -1876,50 +1878,50 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_388_000 picoseconds. - Weight::from_parts(9_322_209, 0) - // Standard Error: 269 - .saturating_add(Weight::from_parts(358_189, 0).saturating_mul(r.into())) + // Minimum execution time: 9_707_000 picoseconds. + Weight::from_parts(10_100_456, 0) + // Standard Error: 234 + .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_origin(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_300_000 picoseconds. - Weight::from_parts(10_268_326, 0) - // Standard Error: 72 - .saturating_add(Weight::from_parts(104_650, 0).saturating_mul(r.into())) + // Minimum execution time: 9_524_000 picoseconds. + Weight::from_parts(10_813_389, 0) + // Standard Error: 76 + .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_caller_is_root(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_162_000 picoseconds. - Weight::from_parts(10_059_984, 0) - // Standard Error: 87 - .saturating_add(Weight::from_parts(87_627, 0).saturating_mul(r.into())) + // Minimum execution time: 9_799_000 picoseconds. + Weight::from_parts(10_886_744, 0) + // Standard Error: 75 + .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_193_000 picoseconds. - Weight::from_parts(10_160_715, 0) - // Standard Error: 152 - .saturating_add(Weight::from_parts(263_703, 0).saturating_mul(r.into())) + // Minimum execution time: 9_895_000 picoseconds. + Weight::from_parts(10_658_338, 0) + // Standard Error: 189 + .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_gas_left(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_109_000 picoseconds. - Weight::from_parts(9_766_924, 0) - // Standard Error: 212 - .saturating_add(Weight::from_parts(291_694, 0).saturating_mul(r.into())) + // Minimum execution time: 9_643_000 picoseconds. + Weight::from_parts(10_932_126, 0) + // Standard Error: 153 + .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) } /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) @@ -1928,10 +1930,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `140` // Estimated: `3599` - // Minimum execution time: 9_463_000 picoseconds. - Weight::from_parts(9_541_000, 3599) - // Standard Error: 3_075 - .saturating_add(Weight::from_parts(1_606_043, 0).saturating_mul(r.into())) + // Minimum execution time: 9_548_000 picoseconds. + Weight::from_parts(9_737_000, 3599) + // Standard Error: 971 + .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1939,40 +1941,40 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_080_000 picoseconds. - Weight::from_parts(8_121_924, 0) - // Standard Error: 198 - .saturating_add(Weight::from_parts(247_527, 0).saturating_mul(r.into())) + // Minimum execution time: 9_172_000 picoseconds. + Weight::from_parts(18_255_933, 0) + // Standard Error: 540 + .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_minimum_balance(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_393_000 picoseconds. - Weight::from_parts(9_999_247, 0) - // Standard Error: 169 - .saturating_add(Weight::from_parts(244_563, 0).saturating_mul(r.into())) + // Minimum execution time: 9_232_000 picoseconds. + Weight::from_parts(9_796_584, 0) + // Standard Error: 208 + .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_block_number(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_236_000 picoseconds. - Weight::from_parts(9_561_435, 0) - // Standard Error: 195 - .saturating_add(Weight::from_parts(239_812, 0).saturating_mul(r.into())) + // Minimum execution time: 9_747_000 picoseconds. + Weight::from_parts(8_733_230, 0) + // Standard Error: 377 + .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_now(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_259_000 picoseconds. - Weight::from_parts(10_353_960, 0) - // Standard Error: 216 - .saturating_add(Weight::from_parts(243_754, 0).saturating_mul(r.into())) + // Minimum execution time: 9_214_000 picoseconds. + Weight::from_parts(10_194_153, 0) + // Standard Error: 516 + .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -1981,10 +1983,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 9_145_000 picoseconds. - Weight::from_parts(16_524_937, 1552) - // Standard Error: 438 - .saturating_add(Weight::from_parts(666_821, 0).saturating_mul(r.into())) + // Minimum execution time: 9_022_000 picoseconds. + Weight::from_parts(22_051_160, 1552) + // Standard Error: 697 + .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1992,10 +1994,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_179_000 picoseconds. - Weight::from_parts(8_893_261, 0) - // Standard Error: 215 - .saturating_add(Weight::from_parts(175_586, 0).saturating_mul(r.into())) + // Minimum execution time: 9_135_000 picoseconds. + Weight::from_parts(10_646_215, 0) + // Standard Error: 161 + .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -2018,10 +2020,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `872` // Estimated: `9287` - // Minimum execution time: 259_315_000 picoseconds. - Weight::from_parts(137_461_362, 9287) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_388, 0).saturating_mul(n.into())) + // Minimum execution time: 273_896_000 picoseconds. + Weight::from_parts(148_309_654, 9287) + // Standard Error: 16 + .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -2030,20 +2032,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_919_000 picoseconds. - Weight::from_parts(9_465_187, 0) - // Standard Error: 32_481 - .saturating_add(Weight::from_parts(992_912, 0).saturating_mul(r.into())) + // Minimum execution time: 8_906_000 picoseconds. + Weight::from_parts(9_264_446, 0) + // Standard Error: 19_760 + .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_return_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_244_000 picoseconds. - Weight::from_parts(10_654_989, 0) + // Minimum execution time: 10_266_000 picoseconds. + Weight::from_parts(10_602_261, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(315, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -2072,10 +2074,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `4805 + r * (2121 ±0)` // Estimated: `13220 + r * (81321 ±0)` - // Minimum execution time: 303_028_000 picoseconds. - Weight::from_parts(323_032_397, 13220) - // Standard Error: 848_406 - .saturating_add(Weight::from_parts(242_988_002, 0).saturating_mul(r.into())) + // Minimum execution time: 295_922_000 picoseconds. + Weight::from_parts(322_472_877, 13220) + // Standard Error: 993_812 + .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().reads((36_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -2089,10 +2091,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 9_227_000 picoseconds. - Weight::from_parts(14_055_283, 1561) - // Standard Error: 758 - .saturating_add(Weight::from_parts(1_104_996, 0).saturating_mul(r.into())) + // Minimum execution time: 9_427_000 picoseconds. + Weight::from_parts(12_996_213, 1561) + // Standard Error: 845 + .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -2100,10 +2102,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_483_000 picoseconds. - Weight::from_parts(20_453_059, 0) - // Standard Error: 3_271 - .saturating_add(Weight::from_parts(1_713_468, 0).saturating_mul(r.into())) + // Minimum execution time: 9_304_000 picoseconds. + Weight::from_parts(25_678_842, 0) + // Standard Error: 1_855 + .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -2113,12 +2115,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 23_517_000 picoseconds. - Weight::from_parts(15_543_153, 990) - // Standard Error: 13_814 - .saturating_add(Weight::from_parts(2_357_255, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(573, 0).saturating_mul(n.into())) + // Minimum execution time: 23_425_000 picoseconds. + Weight::from_parts(15_229_010, 990) + // Standard Error: 14_380 + .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) + // Standard Error: 4 + .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -2128,20 +2130,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_448_000 picoseconds. - Weight::from_parts(9_845_841, 0) - // Standard Error: 58 - .saturating_add(Weight::from_parts(105_442, 0).saturating_mul(r.into())) + // Minimum execution time: 11_117_000 picoseconds. + Weight::from_parts(12_887_533, 0) + // Standard Error: 83 + .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) } /// The range of component `i` is `[0, 1048576]`. fn seal_debug_message_per_byte(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_869_000 picoseconds. - Weight::from_parts(11_024_000, 0) + // Minimum execution time: 10_982_000 picoseconds. + Weight::from_parts(11_176_000, 0) // Standard Error: 8 - .saturating_add(Weight::from_parts(991, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -2150,10 +2152,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_119_000 picoseconds. - Weight::from_parts(9_270_000, 105) - // Standard Error: 8_960 - .saturating_add(Weight::from_parts(5_215_976, 0).saturating_mul(r.into())) + // Minimum execution time: 9_150_000 picoseconds. + Weight::from_parts(9_269_000, 105) + // Standard Error: 8_147 + .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -2165,10 +2167,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `245` // Estimated: `245` - // Minimum execution time: 17_833_000 picoseconds. - Weight::from_parts(18_940_114, 245) - // Standard Error: 2 - .saturating_add(Weight::from_parts(316, 0).saturating_mul(n.into())) + // Minimum execution time: 19_085_000 picoseconds. + Weight::from_parts(20_007_323, 245) + // Standard Error: 3 + .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -2179,10 +2181,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_428_000 picoseconds. - Weight::from_parts(19_372_726, 248) - // Standard Error: 2 - .saturating_add(Weight::from_parts(85, 0).saturating_mul(n.into())) + // Minimum execution time: 19_127_000 picoseconds. + Weight::from_parts(21_152_987, 248) + // Standard Error: 3 + .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -2194,10 +2196,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_335_000 picoseconds. - Weight::from_parts(9_459_000, 105) - // Standard Error: 9_156 - .saturating_add(Weight::from_parts(5_166_621, 0).saturating_mul(r.into())) + // Minimum execution time: 9_264_000 picoseconds. + Weight::from_parts(9_449_000, 105) + // Standard Error: 8_196 + .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -2209,10 +2211,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_308_000 picoseconds. - Weight::from_parts(19_421_433, 248) + // Minimum execution time: 18_489_000 picoseconds. + Weight::from_parts(19_916_153, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(83, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -2224,10 +2226,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_184_000 picoseconds. - Weight::from_parts(9_245_000, 105) - // Standard Error: 8_442 - .saturating_add(Weight::from_parts(4_543_991, 0).saturating_mul(r.into())) + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_464_000, 105) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -2238,10 +2240,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 17_194_000 picoseconds. - Weight::from_parts(19_032_094, 248) + // Minimum execution time: 17_981_000 picoseconds. + Weight::from_parts(19_802_353, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(590, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -2252,10 +2254,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_380_000 picoseconds. - Weight::from_parts(9_501_000, 105) - // Standard Error: 7_029 - .saturating_add(Weight::from_parts(4_406_690, 0).saturating_mul(r.into())) + // Minimum execution time: 9_891_000 picoseconds. + Weight::from_parts(10_046_000, 105) + // Standard Error: 6_993 + .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) } @@ -2266,10 +2268,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 16_400_000 picoseconds. - Weight::from_parts(17_993_941, 248) + // Minimum execution time: 17_229_000 picoseconds. + Weight::from_parts(18_302_733, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(68, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -2280,10 +2282,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `108 + r * (150 ±0)` // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_109_000 picoseconds. - Weight::from_parts(9_265_000, 105) - // Standard Error: 8_733 - .saturating_add(Weight::from_parts(5_218_811, 0).saturating_mul(r.into())) + // Minimum execution time: 9_323_000 picoseconds. + Weight::from_parts(9_462_000, 105) + // Standard Error: 8_031 + .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) @@ -2295,10 +2297,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_423_000 picoseconds. - Weight::from_parts(20_025_132, 248) + // Minimum execution time: 18_711_000 picoseconds. + Weight::from_parts(20_495_670, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(628, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -2310,10 +2312,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `770` // Estimated: `4221 + r * (2475 ±0)` - // Minimum execution time: 9_043_000 picoseconds. - Weight::from_parts(9_176_000, 4221) - // Standard Error: 12_901 - .saturating_add(Weight::from_parts(32_297_438, 0).saturating_mul(r.into())) + // Minimum execution time: 9_226_000 picoseconds. + Weight::from_parts(9_394_000, 4221) + // Standard Error: 14_741 + .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) @@ -2335,10 +2337,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `520 + r * (170 ±0)` // Estimated: `6463 + r * (2646 ±0)` - // Minimum execution time: 9_299_000 picoseconds. - Weight::from_parts(9_427_000, 6463) - // Standard Error: 101_949 - .saturating_add(Weight::from_parts(244_143_691, 0).saturating_mul(r.into())) + // Minimum execution time: 9_455_000 picoseconds. + Weight::from_parts(9_671_000, 6463) + // Standard Error: 126_080 + .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -2359,11 +2361,11 @@ impl WeightInfo for () { fn seal_delegate_call(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (527 ±0)` - // Estimated: `6447 + r * (2583 ±3)` - // Minimum execution time: 9_359_000 picoseconds. - Weight::from_parts(9_425_000, 6447) - // Standard Error: 193_938 - .saturating_add(Weight::from_parts(244_904_401, 0).saturating_mul(r.into())) + // Estimated: `6447 + r * (2583 ±10)` + // Minimum execution time: 9_274_000 picoseconds. + Weight::from_parts(9_437_000, 6447) + // Standard Error: 150_832 + .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) @@ -2386,12 +2388,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `699 + t * (277 ±0)` // Estimated: `6639 + t * (3458 ±0)` - // Minimum execution time: 214_588_000 picoseconds. - Weight::from_parts(129_214_481, 6639) - // Standard Error: 2_468_090 - .saturating_add(Weight::from_parts(32_514_739, 0).saturating_mul(t.into())) + // Minimum execution time: 214_483_000 picoseconds. + Weight::from_parts(122_634_366, 6639) + // Standard Error: 2_499_235 + .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) // Standard Error: 3 - .saturating_add(Weight::from_parts(418, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -2406,10 +2408,10 @@ impl WeightInfo for () { /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:802 w:802) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:801 w:801) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[1, 800]`. @@ -2417,10 +2419,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1097 + r * (188 ±0)` // Estimated: `6990 + r * (2664 ±0)` - // Minimum execution time: 352_925_000 picoseconds. - Weight::from_parts(355_487_000, 6990) - // Standard Error: 261_528 - .saturating_add(Weight::from_parts(337_897_187, 0).saturating_mul(r.into())) + // Minimum execution time: 341_569_000 picoseconds. + Weight::from_parts(360_574_000, 6990) + // Standard Error: 259_746 + .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -2435,10 +2437,10 @@ impl WeightInfo for () { /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// Storage: `Parameters::Parameters` (r:2 w:0) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. @@ -2448,12 +2450,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `760 + t * (104 ±0)` // Estimated: `6719 + t * (2549 ±1)` - // Minimum execution time: 1_870_832_000 picoseconds. - Weight::from_parts(949_110_245, 6719) - // Standard Error: 24 - .saturating_add(Weight::from_parts(1_084, 0).saturating_mul(i.into())) - // Standard Error: 24 - .saturating_add(Weight::from_parts(1_206, 0).saturating_mul(s.into())) + // Minimum execution time: 1_863_119_000 picoseconds. + Weight::from_parts(900_189_174, 6719) + // Standard Error: 13_040_979 + .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(7_u64)) @@ -2465,58 +2469,58 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_142_000 picoseconds. - Weight::from_parts(9_787_220, 0) - // Standard Error: 236 - .saturating_add(Weight::from_parts(267_264, 0).saturating_mul(r.into())) + // Minimum execution time: 9_211_000 picoseconds. + Weight::from_parts(11_696_412, 0) + // Standard Error: 388 + .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_485_000 picoseconds. - Weight::from_parts(1_870_250, 0) + // Minimum execution time: 10_296_000 picoseconds. + Weight::from_parts(572_494, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_073, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_005_000 picoseconds. - Weight::from_parts(8_943_937, 0) - // Standard Error: 1_385 - .saturating_add(Weight::from_parts(665_970, 0).saturating_mul(r.into())) + // Minimum execution time: 9_177_000 picoseconds. + Weight::from_parts(8_620_481, 0) + // Standard Error: 249 + .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_965_000 picoseconds. - Weight::from_parts(11_749_746, 0) - // Standard Error: 6 - .saturating_add(Weight::from_parts(3_330, 0).saturating_mul(n.into())) + // Minimum execution time: 11_240_000 picoseconds. + Weight::from_parts(8_696_186, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 1600]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_400_000 picoseconds. - Weight::from_parts(13_857_546, 0) - // Standard Error: 246 - .saturating_add(Weight::from_parts(326_483, 0).saturating_mul(r.into())) + // Minimum execution time: 9_889_000 picoseconds. + Weight::from_parts(16_103_170, 0) + // Standard Error: 343 + .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_064_000 picoseconds. - Weight::from_parts(1_885_873, 0) + // Minimum execution time: 10_405_000 picoseconds. + Weight::from_parts(2_264_024, 0) // Standard Error: 0 .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) } @@ -2525,60 +2529,60 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_068_000 picoseconds. - Weight::from_parts(17_169_362, 0) - // Standard Error: 1_580 - .saturating_add(Weight::from_parts(330_195, 0).saturating_mul(r.into())) + // Minimum execution time: 9_215_000 picoseconds. + Weight::from_parts(10_505_632, 0) + // Standard Error: 240 + .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_169_000 picoseconds. - Weight::from_parts(2_159_277, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_200, 0).saturating_mul(n.into())) + // Minimum execution time: 10_440_000 picoseconds. + Weight::from_parts(2_575_889, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 53_863_000 picoseconds. - Weight::from_parts(54_902_157, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(4_588, 0).saturating_mul(n.into())) + // Minimum execution time: 55_119_000 picoseconds. + Weight::from_parts(56_732_248, 0) + // Standard Error: 8 + .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) } /// The range of component `r` is `[0, 160]`. fn seal_sr25519_verify(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_107_000 picoseconds. - Weight::from_parts(24_115_247, 0) - // Standard Error: 7_427 - .saturating_add(Weight::from_parts(41_116_827, 0).saturating_mul(r.into())) + // Minimum execution time: 9_176_000 picoseconds. + Weight::from_parts(9_861_102, 0) + // Standard Error: 6_029 + .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_404_000 picoseconds. - Weight::from_parts(31_763_334, 0) - // Standard Error: 9_833 - .saturating_add(Weight::from_parts(45_529_880, 0).saturating_mul(r.into())) + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(28_785_765, 0) + // Standard Error: 9_160 + .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) } /// The range of component `r` is `[0, 160]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_409_000 picoseconds. - Weight::from_parts(15_072_835, 0) - // Standard Error: 4_591 - .saturating_add(Weight::from_parts(11_619_283, 0).saturating_mul(r.into())) + // Minimum execution time: 9_206_000 picoseconds. + Weight::from_parts(12_420_664, 0) + // Standard Error: 3_489 + .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) } /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -2592,11 +2596,11 @@ impl WeightInfo for () { fn seal_set_code_hash(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + r * (926 ±0)` - // Estimated: `8969 + r * (3047 ±10)` - // Minimum execution time: 9_269_000 picoseconds. - Weight::from_parts(9_372_000, 8969) - // Standard Error: 61_354 - .saturating_add(Weight::from_parts(26_280_409, 0).saturating_mul(r.into())) + // Estimated: `8969 + r * (3047 ±7)` + // Minimum execution time: 9_219_000 picoseconds. + Weight::from_parts(9_385_000, 8969) + // Standard Error: 45_562 + .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) @@ -2608,10 +2612,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `274 + r * (78 ±0)` // Estimated: `1265 + r * (2553 ±0)` - // Minimum execution time: 9_103_000 picoseconds. - Weight::from_parts(14_404_626, 1265) - // Standard Error: 9_343 - .saturating_add(Weight::from_parts(5_154_949, 0).saturating_mul(r.into())) + // Minimum execution time: 9_355_000 picoseconds. + Weight::from_parts(15_071_309, 1265) + // Standard Error: 9_722 + .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) @@ -2623,10 +2627,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `275 + r * (78 ±0)` // Estimated: `990 + r * (2568 ±0)` - // Minimum execution time: 9_219_000 picoseconds. - Weight::from_parts(14_085_456, 990) - // Standard Error: 11_206 - .saturating_add(Weight::from_parts(4_422_122, 0).saturating_mul(r.into())) + // Minimum execution time: 8_979_000 picoseconds. + Weight::from_parts(14_362_224, 990) + // Standard Error: 9_137 + .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) @@ -2652,10 +2656,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `861 + r * (3 ±0)` // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 269_333_000 picoseconds. - Weight::from_parts(286_922_618, 9282) - // Standard Error: 443 - .saturating_add(Weight::from_parts(168_869, 0).saturating_mul(r.into())) + // Minimum execution time: 269_704_000 picoseconds. + Weight::from_parts(289_916_035, 9282) + // Standard Error: 408 + .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) @@ -2665,10 +2669,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_328_000 picoseconds. - Weight::from_parts(14_019_583, 0) - // Standard Error: 171 - .saturating_add(Weight::from_parts(88_751, 0).saturating_mul(r.into())) + // Minimum execution time: 9_361_000 picoseconds. + Weight::from_parts(11_633_836, 0) + // Standard Error: 86 + .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -2677,10 +2681,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 9_267_000 picoseconds. - Weight::from_parts(15_304_284, 1704) - // Standard Error: 1_219 - .saturating_add(Weight::from_parts(74_696, 0).saturating_mul(r.into())) + // Minimum execution time: 9_133_000 picoseconds. + Weight::from_parts(13_259_836, 1704) + // Standard Error: 121 + .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2688,9 +2692,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 911_000 picoseconds. - Weight::from_parts(449_666, 0) - // Standard Error: 26 - .saturating_add(Weight::from_parts(14_797, 0).saturating_mul(r.into())) + // Minimum execution time: 851_000 picoseconds. + Weight::from_parts(587_883, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) } } -- GitLab From 8fd839df9d689822fdd830aa1c78099f342ea58f Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:29:08 +0300 Subject: [PATCH 013/107] grandpa: Send neighbor packet to lightclients with every finalized head (#4135) This PR sends the GrandpaNeighbor packet to lightclients similarly to the full-nodes. Previously, the lightclient would receive a GrandpaNeigbor packet only when the note set changed. Related to: https://github.com/paritytech/polkadot-sdk/issues/4120 Next steps: - [ ] check with lightclient --------- Signed-off-by: Alexandru Vasile --- .../grandpa/src/communication/gossip.rs | 34 +++++-------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/substrate/client/consensus/grandpa/src/communication/gossip.rs b/substrate/client/consensus/grandpa/src/communication/gossip.rs index 88821faf0ab..c7fe5a46a5e 100644 --- a/substrate/client/consensus/grandpa/src/communication/gossip.rs +++ b/substrate/client/consensus/grandpa/src/communication/gossip.rs @@ -810,7 +810,7 @@ impl Inner { self.live_topics.push(round, set_id); self.peers.reshuffle(); - self.multicast_neighbor_packet(false) + self.multicast_neighbor_packet() } /// Note that a voter set with given ID has started. Does nothing if the last @@ -847,10 +847,7 @@ impl Inner { self.live_topics.push(Round(1), set_id); self.authorities = authorities; - // when transitioning to a new set we also want to send neighbor packets to light clients, - // this is so that they know who to ask justifications from in order to finalize the last - // block in the previous set. - self.multicast_neighbor_packet(true) + self.multicast_neighbor_packet() } /// Note that we've imported a commit finalizing a given block. Does nothing if the last @@ -869,7 +866,7 @@ impl Inner { return None } - self.multicast_neighbor_packet(false) + self.multicast_neighbor_packet() } fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { @@ -1183,7 +1180,7 @@ impl Inner { (neighbor_topics, action, catch_up, report) } - fn multicast_neighbor_packet(&self, force_light: bool) -> MaybeMessage { + fn multicast_neighbor_packet(&self) -> MaybeMessage { self.local_view.as_ref().map(|local_view| { let packet = NeighborPacket { round: local_view.round, @@ -1191,22 +1188,7 @@ impl Inner { commit_finalized_height: *local_view.last_commit_height().unwrap_or(&Zero::zero()), }; - let peers = self - .peers - .inner - .iter() - .filter_map(|(id, info)| { - // light clients don't participate in the full GRANDPA voter protocol - // and therefore don't need to be informed about all view updates unless - // we explicitly require it (e.g. when transitioning to a new set) - if info.roles.is_light() && !force_light { - None - } else { - Some(id) - } - }) - .cloned() - .collect(); + let peers = self.peers.inner.iter().map(|(id, _)| id).cloned().collect(); (peers, packet) }) @@ -2602,7 +2584,7 @@ mod tests { } #[test] - fn sends_neighbor_packets_to_non_light_peers_when_starting_a_new_round() { + fn sends_neighbor_packets_to_all_peers_when_starting_a_new_round() { let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // initialize the validator to a stable set id @@ -2617,10 +2599,10 @@ mod tests { val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); val.note_round(Round(2), |peers, message| { - assert_eq!(peers.len(), 2); + assert_eq!(peers.len(), 3); assert!(peers.contains(&authority_peer)); assert!(peers.contains(&full_peer)); - assert!(!peers.contains(&light_peer)); + assert!(peers.contains(&light_peer)); assert!(matches!(message, NeighborPacket { set_id: SetId(1), round: Round(2), .. })); }); } -- GitLab From 4be9f93cd7e81c71ead1a8b5445bc695d98b10ba Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 17 Apr 2024 11:09:24 +0200 Subject: [PATCH 014/107] Adjust `xcm-bridge-hub-router`'s `SendXcm::validate` behavior for `NotApplicable` (#4162) This PR adjusts `xcm-bridge-hub-router` to be usable in the chain of routers when a `NotApplicable` error occurs. Closes: https://github.com/paritytech/polkadot-sdk/issues/4133 ## TODO - [ ] backport to polkadot-sdk 1.10.0 crates.io release --- .../modules/xcm-bridge-hub-router/src/lib.rs | 152 ++++++++++++------ .../modules/xcm-bridge-hub-router/src/mock.rs | 4 +- 2 files changed, 109 insertions(+), 47 deletions(-) diff --git a/bridges/modules/xcm-bridge-hub-router/src/lib.rs b/bridges/modules/xcm-bridge-hub-router/src/lib.rs index c6008802ae9..ece72ac8494 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/lib.rs @@ -328,40 +328,58 @@ impl, I: 'static> SendXcm for Pallet { xcm: &mut Option>, ) -> SendResult { log::trace!(target: LOG_TARGET, "validate - msg: {xcm:?}, destination: {dest:?}"); - // `dest` and `xcm` are required here - let dest_ref = dest.as_ref().ok_or(SendError::MissingArgument)?; - let xcm_ref = xcm.as_ref().ok_or(SendError::MissingArgument)?; - - // we won't have an access to `dest` and `xcm` in the `deliver` method, so precompute - // everything required here - let message_size = xcm_ref.encoded_size() as _; - - // bridge doesn't support oversized/overweight messages now. So it is better to drop such - // messages here than at the bridge hub. Let's check the message size. - if message_size > HARD_MESSAGE_SIZE_LIMIT { - return Err(SendError::ExceedsMaxMessageSize) - } - // We need to ensure that the known `dest`'s XCM version can comprehend the current `xcm` - // program. This may seem like an additional, unnecessary check, but it is not. A similar - // check is probably performed by the `ViaBridgeHubExporter`, which attempts to send a - // versioned message to the sibling bridge hub. However, the local bridge hub may have a - // higher XCM version than the remote `dest`. Once again, it is better to discard such - // messages here than at the bridge hub (e.g., to avoid losing funds). - let destination_version = T::DestinationVersion::get_version_for(dest_ref) - .ok_or(SendError::DestinationUnsupported)?; - let _ = VersionedXcm::from(xcm_ref.clone()) - .into_version(destination_version) - .map_err(|()| SendError::DestinationUnsupported)?; - - // just use exporter to validate destination and insert instructions to pay message fee - // at the sibling/child bridge hub - // - // the cost will include both cost of: (1) to-sibling bridge hub delivery (returned by - // the `Config::ToBridgeHubSender`) and (2) to-bridged bridge hub delivery (returned by - // `Self::exporter_for`) - ViaBridgeHubExporter::::validate(dest, xcm) - .map(|(ticket, cost)| ((message_size, ticket), cost)) + // In case of success, the `ViaBridgeHubExporter` can modify XCM instructions and consume + // `dest` / `xcm`, so we retain the clone of original message and the destination for later + // `DestinationVersion` validation. + let xcm_to_dest_clone = xcm.clone(); + let dest_clone = dest.clone(); + + // First, use the inner exporter to validate the destination to determine if it is even + // routable. If it is not, return an error. If it is, then the XCM is extended with + // instructions to pay the message fee at the sibling/child bridge hub. The cost will + // include both the cost of (1) delivery to the sibling bridge hub (returned by + // `Config::ToBridgeHubSender`) and (2) delivery to the bridged bridge hub (returned by + // `Self::exporter_for`). + match ViaBridgeHubExporter::::validate(dest, xcm) { + Ok((ticket, cost)) => { + // If the ticket is ok, it means we are routing with this router, so we need to + // apply more validations to the cloned `dest` and `xcm`, which are required here. + let xcm_to_dest_clone = xcm_to_dest_clone.ok_or(SendError::MissingArgument)?; + let dest_clone = dest_clone.ok_or(SendError::MissingArgument)?; + + // We won't have access to `dest` and `xcm` in the `deliver` method, so we need to + // precompute everything required here. However, `dest` and `xcm` were consumed by + // `ViaBridgeHubExporter`, so we need to use their clones. + let message_size = xcm_to_dest_clone.encoded_size() as _; + + // The bridge doesn't support oversized or overweight messages. Therefore, it's + // better to drop such messages here rather than at the bridge hub. Let's check the + // message size." + if message_size > HARD_MESSAGE_SIZE_LIMIT { + return Err(SendError::ExceedsMaxMessageSize) + } + + // We need to ensure that the known `dest`'s XCM version can comprehend the current + // `xcm` program. This may seem like an additional, unnecessary check, but it is + // not. A similar check is probably performed by the `ViaBridgeHubExporter`, which + // attempts to send a versioned message to the sibling bridge hub. However, the + // local bridge hub may have a higher XCM version than the remote `dest`. Once + // again, it is better to discard such messages here than at the bridge hub (e.g., + // to avoid losing funds). + let destination_version = T::DestinationVersion::get_version_for(&dest_clone) + .ok_or(SendError::DestinationUnsupported)?; + let _ = VersionedXcm::from(xcm_to_dest_clone) + .into_version(destination_version) + .map_err(|()| SendError::DestinationUnsupported)?; + + Ok(((message_size, ticket), cost)) + }, + Err(e) => { + log::trace!(target: LOG_TARGET, "validate - ViaBridgeHubExporter - error: {e:?}"); + Err(e) + }, + } } fn deliver(ticket: Self::Ticket) -> Result { @@ -452,24 +470,51 @@ mod tests { #[test] fn not_applicable_if_destination_is_within_other_network() { run_test(|| { + // unroutable dest + let dest = Location::new(2, [GlobalConsensus(ByGenesis([0; 32])), Parachain(1000)]); + let xcm: Xcm<()> = vec![ClearOrigin].into(); + + // check that router does not consume when `NotApplicable` + let mut xcm_wrapper = Some(xcm.clone()); assert_eq!( - send_xcm::( - Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), - vec![].into(), - ), + XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper), Err(SendError::NotApplicable), ); + // XCM is NOT consumed and untouched + assert_eq!(Some(xcm.clone()), xcm_wrapper); + + // check the full `send_xcm` + assert_eq!(send_xcm::(dest, xcm,), Err(SendError::NotApplicable),); }); } #[test] fn exceeds_max_message_size_if_size_is_above_hard_limit() { run_test(|| { + // routable dest with XCM version + let dest = + Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]); + // oversized XCM + let xcm: Xcm<()> = vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(); + + // dest is routable with the inner router + assert_ok!(ViaBridgeHubExporter::::validate( + &mut Some(dest.clone()), + &mut Some(xcm.clone()) + )); + + // check for oversized message + let mut xcm_wrapper = Some(xcm.clone()); + assert_eq!( + XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper), + Err(SendError::ExceedsMaxMessageSize), + ); + // XCM is consumed by the inner router + assert!(xcm_wrapper.is_none()); + + // check the full `send_xcm` assert_eq!( - send_xcm::( - Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), - vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(), - ), + send_xcm::(dest, xcm,), Err(SendError::ExceedsMaxMessageSize), ); }); @@ -478,11 +523,28 @@ mod tests { #[test] fn destination_unsupported_if_wrap_version_fails() { run_test(|| { + // routable dest but we don't know XCM version + let dest = UnknownXcmVersionForRoutableLocation::get(); + let xcm: Xcm<()> = vec![ClearOrigin].into(); + + // dest is routable with the inner router + assert_ok!(ViaBridgeHubExporter::::validate( + &mut Some(dest.clone()), + &mut Some(xcm.clone()) + )); + + // check that it does not pass XCM version check + let mut xcm_wrapper = Some(xcm.clone()); + assert_eq!( + XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper), + Err(SendError::DestinationUnsupported), + ); + // XCM is consumed by the inner router + assert!(xcm_wrapper.is_none()); + + // check the full `send_xcm` assert_eq!( - send_xcm::( - UnknownXcmVersionLocation::get(), - vec![ClearOrigin].into(), - ), + send_xcm::(dest, xcm,), Err(SendError::DestinationUnsupported), ); }); diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 54e10966d51..20c86d1da9a 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -61,7 +61,7 @@ parameter_types! { Some((BridgeFeeAsset::get(), BASE_FEE).into()) ) ]; - pub UnknownXcmVersionLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]); + pub UnknownXcmVersionForRoutableLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]); } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -76,7 +76,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { type BridgedNetworkId = BridgedNetworkId; type Bridges = NetworkExportTable; type DestinationVersion = - LatestOrNoneForLocationVersionChecker>; + LatestOrNoneForLocationVersionChecker>; type BridgeHubOrigin = EnsureRoot; type ToBridgeHubSender = TestToBridgeHubSender; -- GitLab From e6f3106d894277deba043a83e91565de24263a1b Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 17 Apr 2024 11:25:33 +0200 Subject: [PATCH 015/107] XCM coretime region transfers (#3455) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR introduces changes enabling the transfer of coretime regions via XCM. TL;DR: There are two primary issues that are resolved in this PR: 1. The `mint` and `burn` functions were not implemented for coretime regions. These operations are essential for moving assets to and from the XCM holding register. 2. The transfer of non-fungible assets through XCM was previously disallowed. This was due to incorrectly benchmarking non-fungible asset transfers via XCM, which led to assigning it a weight of `Weight::Max`, effectively preventing its execution. ### `mint_into` and `burn` implementation This PR addresses the issue with cross-chain transferring regions back to the Coretime chain. Remote reserve transfers are performed by withdrawing and depositing the asset to and from the holding registry. This requires the asset to support burning and minting functionality. This PR adds burning and minting; however, they work a bit differently than usual so that the associated region record is not lost when burning. Instead of removing all the data, burning will set the owner of the region to `None`, and when minting it back, it will set it to an actual value. So, when cross-chain transferring, withdrawing into the registry will remove the region from its original owner, and when depositing it from the registry, it will set its owner to another account This was originally implemented in this PR: #3455, however we decided to move all of it to this single PR (https://github.com/paritytech/polkadot-sdk/pull/3455#discussion_r1547324892) ### Fixes made in this PR - Update the `XcmReserveTransferFilter` on coretime chain since it is meant as a reserve chain for coretime regions. - Update the XCM benchmark to use `AssetTransactor` instead of assuming `pallet-balances` for fungible transfers. - Update the XCM benchmark to properly measure weight consumption for nonfungible reserve asset transfers. ATM reserve transfers via the extrinsic do not work since the weight for it is set to `Weight::max()`. Closes: https://github.com/paritytech/polkadot-sdk/issues/865 --------- Co-authored-by: Branislav Kontur Co-authored-by: Francisco Aguirre Co-authored-by: Dónal Murray --- Cargo.lock | 1 + .../coretime/coretime-rococo/src/lib.rs | 57 +++++++--- .../coretime-rococo/src/xcm_config.rs | 2 +- .../coretime/coretime-westend/src/lib.rs | 59 ++++++++--- .../coretime-westend/src/xcm_config.rs | 2 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 100 +++++++++++------- prdoc/pr_3455.prdoc | 28 +++++ substrate/frame/broker/Cargo.toml | 2 + substrate/frame/broker/src/benchmarking.rs | 4 +- .../frame/broker/src/dispatchable_impls.rs | 11 +- substrate/frame/broker/src/lib.rs | 11 +- substrate/frame/broker/src/migration.rs | 87 +++++++++++++++ .../frame/broker/src/nonfungible_impl.rs | 57 ++++++++-- substrate/frame/broker/src/tests.rs | 33 ++++-- substrate/frame/broker/src/types.rs | 2 +- substrate/frame/broker/src/utility_impls.rs | 6 +- 16 files changed, 368 insertions(+), 94 deletions(-) create mode 100644 prdoc/pr_3455.prdoc create mode 100644 substrate/frame/broker/src/migration.rs diff --git a/Cargo.lock b/Cargo.lock index 4ee0dfdccff..584c37dc9c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9971,6 +9971,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "parity-scale-codec", "pretty_assertions", "scale-info", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 9959c15b111..ad065ee3477 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -109,6 +109,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + pallet_broker::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -719,11 +720,20 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); fn reachable_dest() -> Option { Some(Parent.into()) @@ -741,8 +751,21 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // Reserve transfers are disabled - None + // Coretime chain can reserve transfer regions to some random parachain. + + // Properties of a mock region: + let core = 0; + let begin = 0; + let end = 42; + + let region_id = pallet_broker::Pallet::::issue(core, begin, end, None, None); + Some(( + Asset { + fun: NonFungible(Index(region_id.into())), + id: AssetId(xcm_config::BrokerPalletLocation::get()) + }, + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), + )) } fn get_asset() -> Asset { @@ -758,15 +781,25 @@ impl_runtime_apis! { RocRelayLocation::get(), ExistentialDeposit::get() ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); type AccountIdConverter = xcm_config::LocationToAccountId; fn valid_destination() -> Result { Ok(RocRelayLocation::get()) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 7580ab33b8d..7eab53b8a7c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -293,7 +293,7 @@ impl pallet_xcm::Config for Runtime { type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type XcmReserveTransferFilter = Everything; type Weigher = WeightInfoBounds< crate::weights::xcm::CoretimeRococoXcmWeight, RuntimeCall, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 1ee8c3bc0ec..0f074226861 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -109,6 +109,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + pallet_broker::migration::MigrateV0ToV1, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -218,6 +219,7 @@ impl pallet_authorship::Config for Runtime { parameter_types! { pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const RandomParaId: ParaId = ParaId::new(43211234); } impl pallet_balances::Config for Runtime { @@ -710,11 +712,20 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); fn reachable_dest() -> Option { Some(Parent.into()) @@ -732,8 +743,21 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // Reserve transfers are disabled - None + // Coretime chain can reserve transfer regions to some random parachain. + + // Properties of a mock region: + let core = 0; + let begin = 0; + let end = 42; + + let region_id = pallet_broker::Pallet::::issue(core, begin, end, None, None); + Some(( + Asset { + fun: NonFungible(Index(region_id.into())), + id: AssetId(xcm_config::BrokerPalletLocation::get()) + }, + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), + )) } fn get_asset() -> Asset { @@ -753,11 +777,22 @@ impl_runtime_apis! { impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositAsset, - xcm_config::PriceForParentDelivery, - >; + + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + type AccountIdConverter = xcm_config::LocationToAccountId; fn valid_destination() -> Result { Ok(TokenRelayLocation::get()) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 346bdfa4d8c..e1452ec63f2 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -300,7 +300,7 @@ impl pallet_xcm::Config for Runtime { type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type XcmReserveTransferFilter = Everything; type Weigher = WeightInfoBounds< crate::weights::xcm::CoretimeWestendXcmWeight, RuntimeCall, diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index e2903d592dc..5d2e0f7b96f 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -18,10 +18,7 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; -use frame_support::{ - traits::fungible::{Inspect, Mutate}, - weights::Weight, -}; +use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; @@ -90,11 +87,6 @@ pub trait Config: crate::Config { } benchmarks! { - where_clause { - where - T: pallet_balances::Config, - ::Balance: From + Into, - } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -129,11 +121,7 @@ benchmarks! { BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let transferred_amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), - }.into(); - let assets: Assets = asset.into(); + let assets: Assets = asset.clone().into(); let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); @@ -150,13 +138,26 @@ benchmarks! { FeeReason::ChargeFees, ); - // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) - let balance = as Inspect<_>>::balance(&caller); - // Add transferred_amount to origin - as Mutate<_>>::mint_into(&caller, transferred_amount)?; - // verify initial balance - let balance = balance + transferred_amount; - assert_eq!( as Inspect<_>>::balance(&caller), balance); + match &asset.fun { + Fungible(amount) => { + // Add transferred_amount to origin + ::AssetTransactor::deposit_asset( + &Asset { fun: Fungible(*amount), id: asset.id }, + &origin_location, + None, + ).map_err(|error| { + log::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + }, + NonFungible(instance) => { + ::AssetTransactor::deposit_asset(&asset, &origin_location, None) + .map_err(|error| { + log::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + } + }; let recipient = [0u8; 32]; let versioned_dest: VersionedLocation = destination.into(); @@ -164,21 +165,13 @@ benchmarks! { AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) - verify { - // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); - } reserve_transfer_assets { let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; - let transferred_amount = match &asset.fun { - Fungible(amount) => *amount, - _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), - }.into(); - let assets: Assets = asset.into(); + let assets: Assets = asset.clone().into(); let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); @@ -195,23 +188,50 @@ benchmarks! { FeeReason::ChargeFees, ); - // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) - let balance = as Inspect<_>>::balance(&caller); - // Add transferred_amount to origin - as Mutate<_>>::mint_into(&caller, transferred_amount)?; - // verify initial balance - let balance = balance + transferred_amount; - assert_eq!( as Inspect<_>>::balance(&caller), balance); + match &asset.fun { + Fungible(amount) => { + // Add transferred_amount to origin + ::AssetTransactor::deposit_asset( + &Asset { fun: Fungible(*amount), id: asset.id.clone() }, + &origin_location, + None, + ).map_err(|error| { + log::error!("Fungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + }, + NonFungible(instance) => { + ::AssetTransactor::deposit_asset(&asset, &origin_location, None) + .map_err(|error| { + log::error!("Nonfungible asset couldn't be deposited, error: {:?}", error); + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)) + })?; + } + }; let recipient = [0u8; 32]; - let versioned_dest: VersionedLocation = destination.into(); + let versioned_dest: VersionedLocation = destination.clone().into(); let versioned_beneficiary: VersionedLocation = AccountId32 { network: None, id: recipient.into() }.into(); let versioned_assets: VersionedAssets = assets.into(); }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { - // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); + match &asset.fun { + Fungible(amount) => { + assert_ok!(::AssetTransactor::withdraw_asset( + &Asset { fun: Fungible(*amount), id: asset.id }, + &destination, + None, + )); + }, + NonFungible(instance) => { + assert_ok!(::AssetTransactor::withdraw_asset( + &asset, + &destination, + None, + )); + } + }; } transfer_assets { diff --git a/prdoc/pr_3455.prdoc b/prdoc/pr_3455.prdoc new file mode 100644 index 00000000000..c16ac224486 --- /dev/null +++ b/prdoc/pr_3455.prdoc @@ -0,0 +1,28 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Region reserve transfers fix + +doc: + - audience: Runtime User + description: | + This PR introduces changes enabling the transfer of coretime regions via XCM. + There are two primary issues that are resolved in this PR: + 1. The mint and burn functions were not implemented for coretime regions. These operations + are essential for moving assets to and from the XCM holding register. + 2. The transfer of non-fungible assets through XCM was previously disallowed. This was due + to incorrectly benchmarking non-fungible asset transfers via XCM, which led to assigning + it a weight of Weight::Max, effectively preventing its execution. + +migrations: + db: [] + runtime: + - reference: pallet-broker + description: | + The region owner is optional. + +crates: + - name: pallet-broker + - name: pallet-xcm + - name: coretime-rococo-runtime + - name: coretime-westend-runtime diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index f36b94c3cec..ce8d4153045 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -15,6 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +log = { workspace = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } @@ -40,6 +41,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "scale-info/std", "sp-api/std", "sp-arithmetic/std", diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 98ac074ca91..1fc1c3a101a 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -313,8 +313,8 @@ mod benches { assert_last_event::( Event::Transferred { region_id: region, - old_owner: caller, - owner: recipient, + old_owner: Some(caller), + owner: Some(recipient), duration: 3u32.into(), } .into(), diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index b43911b6bc1..cb7393cc9e3 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -120,7 +120,8 @@ impl Pallet { sale.sellout_price = Some(price); } SaleInfo::::put(&sale); - let id = Self::issue(core, sale.region_begin, sale.region_end, who.clone(), Some(price)); + let id = + Self::issue(core, sale.region_begin, sale.region_end, Some(who.clone()), Some(price)); let duration = sale.region_end.saturating_sub(sale.region_begin); Self::deposit_event(Event::Purchased { who, region_id: id, price, duration }); Ok(id) @@ -178,11 +179,11 @@ impl Pallet { let mut region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } let old_owner = region.owner; - region.owner = new_owner; + region.owner = Some(new_owner); Regions::::insert(®ion_id, ®ion); let duration = region.end.saturating_sub(region_id.begin); Self::deposit_event(Event::Transferred { @@ -203,7 +204,7 @@ impl Pallet { let mut region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } let pivot = region_id.begin.saturating_add(pivot_offset); ensure!(pivot < region.end, Error::::PivotTooLate); @@ -227,7 +228,7 @@ impl Pallet { let region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } ensure!((pivot & !region_id.mask).is_void(), Error::::ExteriorPivot); diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 330491eb208..1ef9e59f018 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -36,6 +36,7 @@ mod tick_impls; mod types; mod utility_impls; +pub mod migration; pub mod runtime_api; pub mod weights; @@ -46,6 +47,9 @@ pub use core_mask::*; pub use coretime_interface::*; pub use types::*; +/// The log target for this pallet. +const LOG_TARGET: &str = "runtime::broker"; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -61,7 +65,10 @@ pub mod pallet { use sp_runtime::traits::{Convert, ConvertBack}; use sp_std::vec::Vec; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -216,9 +223,9 @@ pub mod pallet { /// The duration of the Region. duration: Timeslice, /// The old owner of the Region. - old_owner: T::AccountId, + old_owner: Option, /// The new owner of the Region. - owner: T::AccountId, + owner: Option, }, /// A Region has been split into two non-overlapping Regions. Partitioned { diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs new file mode 100644 index 00000000000..95aa28250a6 --- /dev/null +++ b/substrate/frame/broker/src/migration.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::types::RegionRecord; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::traits::{Get, UncheckedOnRuntimeUpgrade}; +use sp_runtime::Saturating; + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +mod v1 { + use super::*; + + /// V0 region record. + #[derive(Encode, Decode)] + struct RegionRecordV0 { + /// The end of the Region. + pub end: Timeslice, + /// The owner of the Region. + pub owner: AccountId, + /// The amount paid to Polkadot for this Region, or `None` if renewal is not allowed. + pub paid: Option, + } + + pub struct MigrateToV1Impl(PhantomData); + + impl UncheckedOnRuntimeUpgrade for MigrateToV1Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut count: u64 = 0; + + >::translate::>, _>(|_, v0| { + count.saturating_inc(); + Some(RegionRecord { end: v0.end, owner: Some(v0.owner), paid: v0.paid }) + }); + + log::info!( + target: LOG_TARGET, + "Storage migration v1 for pallet-broker finished.", + ); + + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok((Regions::::iter_keys().count() as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_count = u32::decode(&mut &state[..]).expect("Known good"); + let new_count = Regions::::iter_values().count() as u32; + + ensure!(old_count == new_count, "Regions count should not change"); + Ok(()) + } + } +} + +/// Migrate the pallet storage from `0` to `1`. +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + v1::MigrateToV1Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/nonfungible_impl.rs b/substrate/frame/broker/src/nonfungible_impl.rs index b2e88bf09a0..80dcc175df5 100644 --- a/substrate/frame/broker/src/nonfungible_impl.rs +++ b/substrate/frame/broker/src/nonfungible_impl.rs @@ -25,12 +25,13 @@ use sp_std::vec::Vec; impl Inspect for Pallet { type ItemId = u128; - fn owner(index: &Self::ItemId) -> Option { - Regions::::get(RegionId::from(*index)).map(|r| r.owner) + fn owner(item: &Self::ItemId) -> Option { + let record = Regions::::get(RegionId::from(*item))?; + record.owner } - fn attribute(index: &Self::ItemId, key: &[u8]) -> Option> { - let id = RegionId::from(*index); + fn attribute(item: &Self::ItemId, key: &[u8]) -> Option> { + let id = RegionId::from(*item); let item = Regions::::get(id)?; match key { b"begin" => Some(id.begin.encode()), @@ -46,11 +47,49 @@ impl Inspect for Pallet { } impl Transfer for Pallet { - fn transfer(index: &Self::ItemId, dest: &T::AccountId) -> DispatchResult { - Self::do_transfer((*index).into(), None, dest.clone()).map_err(Into::into) + fn transfer(item: &Self::ItemId, dest: &T::AccountId) -> DispatchResult { + Self::do_transfer((*item).into(), None, dest.clone()).map_err(Into::into) } } -// We don't allow any of the mutate operations, so the default implementation is used, which will -// return `TokenError::Unsupported` in case any of the operations is called. -impl Mutate for Pallet {} +/// We don't really support burning and minting. +/// +/// We only need this to allow the region to be reserve transferable. +/// +/// For reserve transfers that are not 'local', the asset must first be withdrawn to the holding +/// register and then deposited into the designated account. This process necessitates that the +/// asset is capable of being 'burned' and 'minted'. +/// +/// Since each region is associated with specific record data, we will not actually burn the asset. +/// If we did, we wouldn't know what record to assign to the newly minted region. Therefore, instead +/// of burning, we set the asset's owner to `None`. In essence, 'burning' a region involves setting +/// its owner to `None`, whereas 'minting' the region assigns its owner to an actual account. This +/// way we never lose track of the associated record data. +impl Mutate for Pallet { + /// Deposit a region into an account. + fn mint_into(item: &Self::ItemId, who: &T::AccountId) -> DispatchResult { + let region_id: RegionId = (*item).into(); + let record = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; + + // 'Minting' can only occur if the asset has previously been burned (i.e. moved to the + // holding register) + ensure!(record.owner.is_none(), Error::::NotAllowed); + Self::issue(region_id.core, region_id.begin, record.end, Some(who.clone()), record.paid); + + Ok(()) + } + + /// Withdraw a region from account. + fn burn(item: &Self::ItemId, maybe_check_owner: Option<&T::AccountId>) -> DispatchResult { + let region_id: RegionId = (*item).into(); + let mut record = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; + if let Some(owner) = maybe_check_owner { + ensure!(Some(owner.clone()) == record.owner, Error::::NotOwner); + } + + record.owner = None; + Regions::::insert(region_id, record); + + Ok(()) + } +} diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index 8ec0c6d158b..c573b6c55a2 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -200,14 +200,35 @@ fn transfer_works() { } #[test] -fn mutate_operations_unsupported_for_regions() { - TestExt::new().execute_with(|| { +fn mutate_operations_work() { + TestExt::new().endow(1, 1000).execute_with(|| { let region_id = RegionId { begin: 0, core: 0, mask: CoreMask::complete() }; assert_noop!( >::mint_into(®ion_id.into(), &2), - TokenError::Unsupported + Error::::UnknownRegion + ); + + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + let region_id = Broker::do_purchase(1, u64::max_value()).unwrap(); + assert_noop!( + >::mint_into(®ion_id.into(), &2), + Error::::NotAllowed + ); + + assert_noop!( + >::burn(®ion_id.into(), Some(&2)), + Error::::NotOwner ); - assert_noop!(>::burn(®ion_id.into(), None), TokenError::Unsupported); + // 'withdraw' the region from user 1: + assert_ok!(>::burn(®ion_id.into(), Some(&1))); + assert_eq!(Regions::::get(region_id).unwrap().owner, None); + + // `mint_into` works after burning: + assert_ok!(>::mint_into(®ion_id.into(), &2)); + assert_eq!(Regions::::get(region_id).unwrap().owner, Some(2)); + + // Unsupported operations: assert_noop!( >::set_attribute(®ion_id.into(), &[], &[]), TokenError::Unsupported @@ -285,7 +306,7 @@ fn nft_metadata_works() { assert_eq!(attribute::(region, b"begin"), 4); assert_eq!(attribute::(region, b"length"), 3); assert_eq!(attribute::(region, b"end"), 7); - assert_eq!(attribute::(region, b"owner"), 1); + assert_eq!(attribute::>(region, b"owner"), Some(1)); assert_eq!(attribute::(region, b"part"), 0xfffff_fffff_fffff_fffff.into()); assert_eq!(attribute::(region, b"core"), 0); assert_eq!(attribute::>(region, b"paid"), Some(100)); @@ -297,7 +318,7 @@ fn nft_metadata_works() { assert_eq!(attribute::(region, b"begin"), 6); assert_eq!(attribute::(region, b"length"), 1); assert_eq!(attribute::(region, b"end"), 7); - assert_eq!(attribute::(region, b"owner"), 42); + assert_eq!(attribute::>(region, b"owner"), Some(42)); assert_eq!(attribute::(region, b"part"), 0x00000_fffff_fffff_00000.into()); assert_eq!(attribute::(region, b"core"), 0); assert_eq!(attribute::>(region, b"paid"), None); diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index e8119d29ef5..f2cae9a41ad 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -84,7 +84,7 @@ pub struct RegionRecord { /// The end of the Region. pub end: Timeslice, /// The owner of the Region. - pub owner: AccountId, + pub owner: Option, /// The amount paid to Polkadot for this Region, or `None` if renewal is not allowed. pub paid: Option, } diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 3dba5be5b39..4163817a8b5 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -72,11 +72,11 @@ impl Pallet { Ok(()) } - pub(crate) fn issue( + pub fn issue( core: CoreIndex, begin: Timeslice, end: Timeslice, - owner: T::AccountId, + owner: Option, paid: Option>, ) -> RegionId { let id = RegionId { begin, core, mask: CoreMask::complete() }; @@ -94,7 +94,7 @@ impl Pallet { let region = Regions::::get(®ion_id).ok_or(Error::::UnknownRegion)?; if let Some(check_owner) = maybe_check_owner { - ensure!(check_owner == region.owner, Error::::NotOwner); + ensure!(Some(check_owner) == region.owner, Error::::NotOwner); } Regions::::remove(®ion_id); -- GitLab From 4e10d3b0a6ec2eccf58c471e7739948c1a867acf Mon Sep 17 00:00:00 2001 From: Muharem Date: Wed, 17 Apr 2024 12:39:23 +0200 Subject: [PATCH 016/107] Asset Conversion: Pool Account ID derivation with additional Pallet ID seed (#3250) Introduce `PalletId` as an additional seed parameter for pool's account id derivation. The PR also introduces the `pallet_asset_conversion_ops` pallet with a call to migrate a given pool to thew new account. Additionally `fungibles::lifetime::ResetTeam` and `fungible::lifetime::Refund` traits, to facilitate the migration of pools. --------- Co-authored-by: command-bot <> --- Cargo.lock | 25 ++ Cargo.toml | 1 + .../assets/asset-hub-rococo/Cargo.toml | 4 + .../assets/asset-hub-rococo/src/lib.rs | 31 +- .../asset-hub-rococo/src/weights/mod.rs | 1 + .../weights/pallet_asset_conversion_ops.rs | 71 ++++ .../assets/asset-hub-westend/Cargo.toml | 4 + .../assets/asset-hub-westend/src/lib.rs | 30 +- .../asset-hub-westend/src/weights/mod.rs | 1 + .../weights/pallet_asset_conversion_ops.rs | 71 ++++ prdoc/pr_3250.prdoc | 16 + substrate/bin/node/runtime/Cargo.toml | 4 + substrate/bin/node/runtime/src/lib.rs | 32 +- substrate/frame/asset-conversion/Cargo.toml | 2 + .../frame/asset-conversion/ops/Cargo.toml | 71 ++++ .../asset-conversion/ops/src/benchmarking.rs | 167 +++++++++ .../frame/asset-conversion/ops/src/lib.rs | 331 ++++++++++++++++++ .../frame/asset-conversion/ops/src/mock.rs | 147 ++++++++ .../frame/asset-conversion/ops/src/tests.rs | 308 ++++++++++++++++ .../frame/asset-conversion/ops/src/weights.rs | 104 ++++++ substrate/frame/asset-conversion/src/mock.rs | 7 +- substrate/frame/asset-conversion/src/types.rs | 78 +++-- substrate/frame/assets/src/functions.rs | 35 +- substrate/frame/assets/src/impl_fungibles.rs | 32 ++ substrate/frame/assets/src/lib.rs | 2 +- .../src/traits/tokens/fungible/union_of.rs | 25 ++ .../src/traits/tokens/fungibles/lifetime.rs | 22 +- .../src/traits/tokens/fungibles/mod.rs | 2 +- .../src/traits/tokens/fungibles/roles.rs | 21 ++ .../src/traits/tokens/fungibles/union_of.rs | 32 ++ .../asset-conversion-tx-payment/src/mock.rs | 9 +- 31 files changed, 1647 insertions(+), 39 deletions(-) create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs create mode 100644 prdoc/pr_3250.prdoc create mode 100644 substrate/frame/asset-conversion/ops/Cargo.toml create mode 100644 substrate/frame/asset-conversion/ops/src/benchmarking.rs create mode 100644 substrate/frame/asset-conversion/ops/src/lib.rs create mode 100644 substrate/frame/asset-conversion/ops/src/mock.rs create mode 100644 substrate/frame/asset-conversion/ops/src/tests.rs create mode 100644 substrate/frame/asset-conversion/ops/src/weights.rs diff --git a/Cargo.lock b/Cargo.lock index 584c37dc9c3..aeb8b2cdb19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -871,6 +871,7 @@ dependencies = [ "hex-literal", "log", "pallet-asset-conversion", + "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", "pallet-aura", @@ -995,6 +996,7 @@ dependencies = [ "hex-literal", "log", "pallet-asset-conversion", + "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-assets", "pallet-aura", @@ -7383,6 +7385,7 @@ dependencies = [ "node-primitives", "pallet-alliance", "pallet-asset-conversion", + "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", @@ -9534,6 +9537,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-assets", "pallet-balances", "parity-scale-codec", @@ -9547,6 +9551,27 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-asset-conversion-ops" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-asset-conversion", + "pallet-assets", + "pallet-balances", + "parity-scale-codec", + "primitive-types", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", +] + [[package]] name = "pallet-asset-conversion-tx-payment" version = "10.0.0" diff --git a/Cargo.toml b/Cargo.toml index b3ae264bc3a..42a6bc8abe1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -301,6 +301,7 @@ members = [ "substrate/frame", "substrate/frame/alliance", "substrate/frame/asset-conversion", + "substrate/frame/asset-conversion/ops", "substrate/frame/asset-rate", "substrate/frame/assets", "substrate/frame/atomic-swap", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 0733156716c..e8be734214f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -25,6 +25,7 @@ frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/r frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } @@ -120,6 +121,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -153,6 +155,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", "pallet-assets/try-runtime", @@ -201,6 +204,7 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "log/std", + "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", "pallet-assets/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 23d8f9b667d..30e211a8f1d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -333,6 +333,11 @@ pub type NativeAndAssets = fungible::UnionOf< AccountId, >; +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (xcm::v3::Location, xcm::v3::Location), +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -340,8 +345,12 @@ impl pallet_asset_conversion::Config for Runtime { type AssetKind = xcm::v3::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); - type PoolLocator = - pallet_asset_conversion::WithFirstAsset; + type PoolLocator = pallet_asset_conversion::WithFirstAsset< + TokenLocationV3, + AccountId, + Self::AssetKind, + PoolIdToAccountId, + >; type PoolAssetId = u32; type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam @@ -362,6 +371,18 @@ impl pallet_asset_conversion::Config for Runtime { >; } +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed< + ::PoolId, + >; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = weights::pallet_asset_conversion_ops::WeightInfo; +} + parameter_types! { // we just reuse the same deposits pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); @@ -934,6 +955,10 @@ construct_runtime!( #[cfg(feature = "state-trie-version-1")] StateTrieMigration: pallet_state_trie_migration = 70, + + // TODO: the pallet instance should be removed once all pools have migrated + // to the new account IDs. + AssetConversionMigration: pallet_asset_conversion_ops = 200, } ); @@ -961,6 +986,7 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. +#[allow(deprecated)] pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions, @@ -1054,6 +1080,7 @@ mod benches { [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToWestend] + [pallet_asset_conversion_ops, AssetConversionMigration] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index fa9e86102c6..f20790cde39 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_ops; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs new file mode 100644 index 00000000000..e85420d32d9 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_ops.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-15, STEPS: `10`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-rococo-dev +// --steps=10 +// --repeat=2 +// --pallet=pallet-asset-conversion-ops +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_ops`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_ops::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1105` + // Estimated: `7404` + // Minimum execution time: 2_323_000_000 picoseconds. + Weight::from_parts(2_404_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(8)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index e25554ec0a5..554659415a0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -23,6 +23,7 @@ frame-system = { path = "../../../../../substrate/frame/system", default-feature frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-conversion-ops = { path = "../../../../../substrate/frame/asset-conversion/ops", default-features = false } pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } @@ -111,6 +112,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "hex-literal", + "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", @@ -142,6 +144,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "frame-try-runtime/try-runtime", + "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", "pallet-assets/try-runtime", @@ -189,6 +192,7 @@ std = [ "frame-system/std", "frame-try-runtime?/std", "log/std", + "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", "pallet-assets/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index cb2f1163718..366fb91723a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -315,6 +315,11 @@ pub type NativeAndAssets = fungible::UnionOf< AccountId, >; +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (xcm::v3::Location, xcm::v3::Location), +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -322,8 +327,12 @@ impl pallet_asset_conversion::Config for Runtime { type AssetKind = xcm::v3::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); - type PoolLocator = - pallet_asset_conversion::WithFirstAsset; + type PoolLocator = pallet_asset_conversion::WithFirstAsset< + WestendLocationV3, + AccountId, + Self::AssetKind, + PoolIdToAccountId, + >; type PoolAssetId = u32; type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam @@ -344,6 +353,18 @@ impl pallet_asset_conversion::Config for Runtime { >; } +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed< + ::PoolId, + >; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = weights::pallet_asset_conversion_ops::WeightInfo; +} + parameter_types! { // we just reuse the same deposits pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); @@ -906,6 +927,10 @@ construct_runtime!( NftFractionalization: pallet_nft_fractionalization = 54, PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, + + // TODO: the pallet instance should be removed once all pools have migrated + // to the new account IDs. + AssetConversionMigration: pallet_asset_conversion_ops = 200, } ); @@ -1085,6 +1110,7 @@ mod benches { [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] + [pallet_asset_conversion_ops, AssetConversionMigration] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 2f1fcfb05f3..4eebb1f8d78 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -19,6 +19,7 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_ops; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs new file mode 100644 index 00000000000..dfe4092c3f0 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_ops.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-15, STEPS: `10`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-westend-dev +// --steps=10 +// --repeat=2 +// --pallet=pallet-asset-conversion-ops +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_ops`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_ops::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1105` + // Estimated: `7404` + // Minimum execution time: 2_216_000_000 picoseconds. + Weight::from_parts(2_379_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(8)) + } +} diff --git a/prdoc/pr_3250.prdoc b/prdoc/pr_3250.prdoc new file mode 100644 index 00000000000..77ea725073e --- /dev/null +++ b/prdoc/pr_3250.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Asset Conversion: Pool Account ID derivation with additional Pallet ID seed" + +doc: + - audience: Runtime Dev + description: | + Introduce PalletId as an additional seed parameter for pool's account id derivation. + The PR also introduces the `pallet_asset_conversion_ops` pallet with a call to migrate + a pool to the new account. Additionally `fungibles::roles::ResetTeam` and + `fungible::lifetime::Refund` traits, to facilitate the migration functionality. + +crates: + - name: pallet-asset-conversion + bump: minor diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index f4ed75b9196..00eab9b75f6 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -66,6 +66,7 @@ frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } pallet-alliance = { path = "../../../frame/alliance", default-features = false } pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false } +pallet-asset-conversion-ops = { path = "../../../frame/asset-conversion/ops", default-features = false } pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false } pallet-assets = { path = "../../../frame/assets", default-features = false } pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false } @@ -166,6 +167,7 @@ std = [ "log/std", "node-primitives/std", "pallet-alliance/std", + "pallet-asset-conversion-ops/std", "pallet-asset-conversion-tx-payment/std", "pallet-asset-conversion/std", "pallet-asset-rate/std", @@ -278,6 +280,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-alliance/runtime-benchmarks", + "pallet-asset-conversion-ops/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-asset-rate/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", @@ -354,6 +357,7 @@ try-runtime = [ "frame-system/try-runtime", "frame-try-runtime/try-runtime", "pallet-alliance/try-runtime", + "pallet-asset-conversion-ops/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-conversion/try-runtime", "pallet-asset-rate/try-runtime", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index faf7cf7967b..43c617023bc 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -63,7 +63,7 @@ use frame_system::{ }; pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Moment, Nonce}; -use pallet_asset_conversion::{Ascending, Chain, WithFirstAsset}; +use pallet_asset_conversion::{AccountIdConverter, Ascending, Chain, WithFirstAsset}; use pallet_broker::{CoreAssignment, CoreIndex, CoretimeInterface, PartsOf57600}; use pallet_election_provider_multi_phase::{GeometricDepositBase, SolutionAccuracyOf}; use pallet_identity::legacy::IdentityInfo; @@ -1710,8 +1710,17 @@ impl pallet_asset_conversion::Config for Runtime { type Assets = UnionOf, AccountId>; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = Chain< - WithFirstAsset>, - Ascending>, + WithFirstAsset< + Native, + AccountId, + NativeOrWithId, + AccountIdConverter, + >, + Ascending< + AccountId, + NativeOrWithId, + AccountIdConverter, + >, >; type PoolAssetId = >::AssetId; type PoolAssets = PoolAssets; @@ -1728,6 +1737,19 @@ impl pallet_asset_conversion::Config for Runtime { type BenchmarkHelper = (); } +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed<( + NativeOrWithId, + NativeOrWithId, + )>; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = pallet_asset_conversion_ops::weights::SubstrateWeight; +} + parameter_types! { pub const QueueCount: u32 = 300; pub const MaxQueueLen: u32 = 1000; @@ -2474,6 +2496,9 @@ mod runtime { #[runtime::pallet_index(78)] pub type PalletExampleMbms = pallet_example_mbm; + + #[runtime::pallet_index(79)] + pub type AssetConversionMigration = pallet_asset_conversion_ops; } /// The address format for describing accounts. @@ -2633,6 +2658,7 @@ mod benches { [pallet_tx_pause, TxPause] [pallet_safe_mode, SafeMode] [pallet_example_mbm, PalletExampleMbms] + [pallet_asset_conversion_ops, AssetConversionMigration] ); } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index 1a8e8eea484..cf50d7b22af 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.20", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } @@ -40,6 +41,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-assets/std", "pallet-balances/std", "primitive-types/std", diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml new file mode 100644 index 00000000000..e421e904a3a --- /dev/null +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "pallet-asset-conversion-ops" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME asset conversion pallet's operations suite" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +log = { version = "0.4.20", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +pallet-asset-conversion = { path = "..", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } + +[dev-dependencies] +pallet-balances = { path = "../../balances" } +pallet-assets = { path = "../../assets" } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-asset-conversion/std", + "pallet-assets/std", + "pallet-balances/std", + "primitive-types/std", + "scale-info/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-asset-conversion/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/asset-conversion/ops/src/benchmarking.rs b/substrate/frame/asset-conversion/ops/src/benchmarking.rs new file mode 100644 index 00000000000..a7370f38bc4 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/benchmarking.rs @@ -0,0 +1,167 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Asset Conversion Ops pallet benchmarking. + +use super::*; +use crate::Pallet as AssetConversionOps; +use frame_benchmarking::{v2::*, whitelisted_caller}; +use frame_support::{ + assert_ok, + traits::fungibles::{Create, Inspect, Mutate}, +}; +use frame_system::RawOrigin as SystemOrigin; +use pallet_asset_conversion::{BenchmarkHelper, Pallet as AssetConversion}; +use sp_core::Get; +use sp_runtime::traits::One; +use sp_std::prelude::*; + +/// Provides a pair of amounts expected to serve as sufficient initial liquidity for a pool. +fn valid_liquidity_amount(ed1: T::Balance, ed2: T::Balance) -> (T::Balance, T::Balance) +where + T::Assets: Inspect, +{ + let l = + ed1.max(ed2) + T::MintMinLiquidity::get() + T::MintMinLiquidity::get() + T::Balance::one(); + (l, l) +} + +/// Create the `asset` and mint the `amount` for the `caller`. +fn create_asset(caller: &T::AccountId, asset: &T::AssetKind, amount: T::Balance) +where + T::Assets: Create + Mutate, +{ + if !T::Assets::asset_exists(asset.clone()) { + assert_ok!(T::Assets::create(asset.clone(), caller.clone(), true, T::Balance::one())); + } + assert_ok!(T::Assets::mint_into( + asset.clone(), + &caller, + amount + T::Assets::minimum_balance(asset.clone()) + )); +} + +/// Create the designated fee asset for pool creation. +fn create_fee_asset(caller: &T::AccountId) +where + T::Assets: Create + Mutate, +{ + let fee_asset = T::PoolSetupFeeAsset::get(); + if !T::Assets::asset_exists(fee_asset.clone()) { + assert_ok!(T::Assets::create(fee_asset.clone(), caller.clone(), true, T::Balance::one())); + } + assert_ok!(T::Assets::mint_into( + fee_asset.clone(), + &caller, + T::Assets::minimum_balance(fee_asset) + )); +} + +/// Mint the fee asset for the `caller` sufficient to cover the fee for creating a new pool. +fn mint_setup_fee_asset( + caller: &T::AccountId, + asset1: &T::AssetKind, + asset2: &T::AssetKind, + lp_token: &T::PoolAssetId, +) where + T::Assets: Create + Mutate, +{ + assert_ok!(T::Assets::mint_into( + T::PoolSetupFeeAsset::get(), + &caller, + T::PoolSetupFee::get() + + T::Assets::deposit_required(asset1.clone()) + + T::Assets::deposit_required(asset2.clone()) + + T::PoolAssets::deposit_required(lp_token.clone()) + )); +} + +/// Creates a pool for a given asset pair. +/// +/// This action mints the necessary amounts of the given assets for the `caller` to provide initial +/// liquidity. It returns the LP token ID along with a pair of amounts sufficient for the pool's +/// initial liquidity. +fn create_asset_and_pool( + caller: &T::AccountId, + asset1: &T::AssetKind, + asset2: &T::AssetKind, +) -> (T::PoolAssetId, T::Balance, T::Balance) +where + T::Assets: Create + Mutate, +{ + let (liquidity1, liquidity2) = valid_liquidity_amount::( + T::Assets::minimum_balance(asset1.clone()), + T::Assets::minimum_balance(asset2.clone()), + ); + create_asset::(caller, asset1, liquidity1); + create_asset::(caller, asset2, liquidity2); + let lp_token = AssetConversion::::get_next_pool_asset_id(); + + mint_setup_fee_asset::(caller, asset1, asset2, &lp_token); + + assert_ok!(AssetConversion::::create_pool( + SystemOrigin::Signed(caller.clone()).into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()) + )); + + (lp_token, liquidity1, liquidity2) +} + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks(where T::Assets: Create + Mutate, T::PoolAssetId: Into,)] +mod benchmarks { + use super::*; + + #[benchmark] + fn migrate_to_new_account() { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); + + create_fee_asset::(&caller); + let (_, liquidity1, liquidity2) = create_asset_and_pool::(&caller, &asset1, &asset2); + + assert_ok!(AssetConversion::::add_liquidity( + SystemOrigin::Signed(caller.clone()).into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()), + liquidity1, + liquidity2, + T::Balance::one(), + T::Balance::zero(), + caller.clone(), + )); + + #[extrinsic_call] + _(SystemOrigin::Signed(caller.clone()), Box::new(asset1.clone()), Box::new(asset2.clone())); + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2).unwrap(); + let (prior_account, new_account) = AssetConversionOps::::addresses(&pool_id).unwrap(); + assert_last_event::( + Event::MigratedToNewAccount { pool_id, new_account, prior_account }.into(), + ); + } + + impl_benchmark_test_suite!(AssetConversionOps, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/asset-conversion/ops/src/lib.rs b/substrate/frame/asset-conversion/ops/src/lib.rs new file mode 100644 index 00000000000..6cc7bfa2e99 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/lib.rs @@ -0,0 +1,331 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Conversion Operations Suite. +//! +//! This pallet provides operational functionalities for the Asset Conversion pallet, +//! allowing you to perform various migration and one-time-use operations. These operations +//! are designed to facilitate updates and changes to the Asset Conversion pallet without +//! breaking its API. +//! +//! ## Overview +//! +//! This suite allows you to perform the following operations: +//! - Perform migration to update account ID derivation methods for existing pools. The migration +//! operation ensures that the required accounts are created, existing account deposits are +//! transferred, and liquidity is moved to the new accounts. + +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +pub mod weights; +pub use pallet::*; +pub use weights::WeightInfo; + +use frame_support::traits::{ + fungible::{Inspect as FungibleInspect, Mutate as FungibleMutate}, + fungibles::{roles::ResetTeam, Inspect, Mutate, Refund}, + tokens::{Fortitude, Precision, Preservation}, + AccountTouch, +}; +use pallet_asset_conversion::{PoolLocator, Pools}; +use sp_runtime::traits::{TryConvert, Zero}; +use sp_std::boxed::Box; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: + pallet_asset_conversion::Config< + PoolId = ( + ::AssetKind, + ::AssetKind, + ), + > + frame_system::Config + { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Type previously used to derive the account ID for a pool. Indicates that the pool's + /// liquidity assets are located at this account before the migration. + type PriorAccountIdConverter: for<'a> TryConvert< + &'a (Self::AssetKind, Self::AssetKind), + Self::AccountId, + >; + + /// Retrieves information about an existing deposit for a given account ID and asset from + /// the [`pallet_asset_conversion::Config::Assets`] registry and can initiate the refund. + type AssetsRefund: Refund< + Self::AccountId, + AssetId = Self::AssetKind, + Balance = >::Balance, + >; + + /// Retrieves information about an existing deposit for a given account ID and asset from + /// the [`pallet_asset_conversion::Config::PoolAssets`] registry and can initiate the + /// refund. + type PoolAssetsRefund: Refund< + Self::AccountId, + AssetId = Self::PoolAssetId, + Balance = >::Balance, + >; + + /// Means to reset the team for assets from the + /// [`pallet_asset_conversion::Config::PoolAssets`] registry. + type PoolAssetsTeam: ResetTeam; + + /// Registry of an asset used as an account deposit for the + /// [`pallet_asset_conversion::Config::Assets`] and + /// [`pallet_asset_conversion::Config::PoolAssets`] registries. + type DepositAsset: FungibleMutate; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + // Pallet's events. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Indicates that a pool has been migrated to the new account ID. + MigratedToNewAccount { + /// Pool's ID. + pool_id: T::PoolId, + /// Pool's prior account ID. + prior_account: T::AccountId, + /// Pool's new account ID. + new_account: T::AccountId, + }, + } + + #[pallet::error] + pub enum Error { + /// Provided asset pair is not supported for pool. + InvalidAssetPair, + /// The pool doesn't exist. + PoolNotFound, + /// Pool's balance cannot be zero. + ZeroBalance, + /// Indicates a partial transfer of balance to the new account during a migration. + PartialTransfer, + } + + /// Pallet's callable functions. + #[pallet::call] + impl Pallet { + /// Migrates an existing pool to a new account ID derivation method for a given asset pair. + /// If the migration is successful, transaction fees are refunded to the caller. + /// + /// Must be signed. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::migrate_to_new_account())] + pub fn migrate_to_new_account( + origin: OriginFor, + asset1: Box, + asset2: Box, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + let info = Pools::::get(&pool_id).ok_or(Error::::PoolNotFound)?; + + let (prior_account, new_account) = + Self::addresses(&pool_id).ok_or(Error::::InvalidAssetPair)?; + + let (asset1, asset2) = pool_id.clone(); + + // Assets that must be transferred to the new account id. + let balance1 = T::Assets::total_balance(asset1.clone(), &prior_account); + let balance2 = T::Assets::total_balance(asset2.clone(), &prior_account); + let lp_balance = T::PoolAssets::total_balance(info.lp_token.clone(), &prior_account); + + ensure!(!balance1.is_zero(), Error::::ZeroBalance); + ensure!(!balance2.is_zero(), Error::::ZeroBalance); + ensure!(!lp_balance.is_zero(), Error::::ZeroBalance); + + // Check if a deposit needs to be placed for the new account. If so, mint the + // required deposit amount to the depositor's account to ensure the deposit can be + // provided. Once the deposit from the prior account is returned, the minted assets will + // be burned. Touching the new account is necessary because it's not possible to + // transfer assets to the new account if it's required. Additionally, the deposit cannot + // be refunded from the prior account until its balance is zero. + + let deposit_asset_ed = T::DepositAsset::minimum_balance(); + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset1.clone(), prior_account.clone()) + { + T::DepositAsset::mint_into(&depositor, deposit + deposit_asset_ed)?; + T::Assets::touch(asset1.clone(), &new_account, &depositor)?; + } + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset2.clone(), prior_account.clone()) + { + T::DepositAsset::mint_into(&depositor, deposit + deposit_asset_ed)?; + T::Assets::touch(asset2.clone(), &new_account, &depositor)?; + } + + if let Some((depositor, deposit)) = + T::PoolAssetsRefund::deposit_held(info.lp_token.clone(), prior_account.clone()) + { + T::DepositAsset::mint_into(&depositor, deposit + deposit_asset_ed)?; + T::PoolAssets::touch(info.lp_token.clone(), &new_account, &depositor)?; + } + + // Transfer all pool related assets to the new account. + + ensure!( + balance1 == + T::Assets::transfer( + asset1.clone(), + &prior_account, + &new_account, + balance1, + Preservation::Expendable, + )?, + Error::::PartialTransfer + ); + + ensure!( + balance2 == + T::Assets::transfer( + asset2.clone(), + &prior_account, + &new_account, + balance2, + Preservation::Expendable, + )?, + Error::::PartialTransfer + ); + + ensure!( + lp_balance == + T::PoolAssets::transfer( + info.lp_token.clone(), + &prior_account, + &new_account, + lp_balance, + Preservation::Expendable, + )?, + Error::::PartialTransfer + ); + + // Refund deposits from prior accounts and burn previously minted assets. + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset1.clone(), prior_account.clone()) + { + T::AssetsRefund::refund(asset1.clone(), prior_account.clone())?; + T::DepositAsset::burn_from( + &depositor, + deposit + deposit_asset_ed, + Precision::Exact, + Fortitude::Force, + )?; + } + + if let Some((depositor, deposit)) = + T::AssetsRefund::deposit_held(asset2.clone(), prior_account.clone()) + { + T::AssetsRefund::refund(asset2.clone(), prior_account.clone())?; + T::DepositAsset::burn_from( + &depositor, + deposit + deposit_asset_ed, + Precision::Exact, + Fortitude::Force, + )?; + } + + if let Some((depositor, deposit)) = + T::PoolAssetsRefund::deposit_held(info.lp_token.clone(), prior_account.clone()) + { + T::PoolAssetsRefund::refund(info.lp_token.clone(), prior_account.clone())?; + T::DepositAsset::burn_from( + &depositor, + deposit + deposit_asset_ed, + Precision::Exact, + Fortitude::Force, + )?; + } + + T::PoolAssetsTeam::reset_team( + info.lp_token, + new_account.clone(), + new_account.clone(), + new_account.clone(), + new_account.clone(), + )?; + + Self::deposit_event(Event::MigratedToNewAccount { + pool_id, + prior_account, + new_account, + }); + + Ok(Pays::No.into()) + } + } + + impl Pallet { + /// Returns the prior and new account IDs for a given pool ID. The prior account ID comes + /// first in the tuple. + #[cfg(not(any(test, feature = "runtime-benchmarks")))] + fn addresses(pool_id: &T::PoolId) -> Option<(T::AccountId, T::AccountId)> { + match ( + T::PriorAccountIdConverter::try_convert(pool_id), + T::PoolLocator::address(pool_id), + ) { + (Ok(a), Ok(b)) if a != b => Some((a, b)), + _ => None, + } + } + + /// Returns the prior and new account IDs for a given pool ID. The prior account ID comes + /// first in the tuple. + /// + /// This function is intended for use only in test and benchmark environments. The prior + /// account ID represents the new account ID from [`Config::PoolLocator`], allowing the use + /// of the main pallet's calls to set up a pool with liquidity placed in that account and + /// migrate it to another account, which in this case is the result of + /// [`Config::PriorAccountIdConverter`]. + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub(crate) fn addresses(pool_id: &T::PoolId) -> Option<(T::AccountId, T::AccountId)> { + match ( + T::PoolLocator::address(pool_id), + T::PriorAccountIdConverter::try_convert(pool_id), + ) { + (Ok(a), Ok(b)) if a != b => Some((a, b)), + _ => None, + } + } + } +} diff --git a/substrate/frame/asset-conversion/ops/src/mock.rs b/substrate/frame/asset-conversion/ops/src/mock.rs new file mode 100644 index 00000000000..9454b3a9ad4 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/mock.rs @@ -0,0 +1,147 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Asset Conversion Ops pallet. + +use super::*; +use crate as pallet_asset_conversion_ops; +use core::default::Default; +use frame_support::{ + construct_runtime, derive_impl, + instances::{Instance1, Instance2}, + ord_parameter_types, parameter_types, + traits::{ + tokens::{ + fungible::{NativeFromLeft, NativeOrWithId, UnionOf}, + imbalance::ResolveAssetTo, + }, + AsEnsureOriginWithArg, ConstU32, ConstU64, + }, + PalletId, +}; +use frame_system::{EnsureSigned, EnsureSignedBy}; +use pallet_asset_conversion::{self, AccountIdConverter, AccountIdConverterNoSeed, Ascending}; +use sp_arithmetic::Permill; +use sp_runtime::{traits::AccountIdConversion, BuildStorage}; + +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Assets: pallet_assets::, + PoolAssets: pallet_assets::, + AssetConversion: pallet_asset_conversion, + AssetConversionOps: pallet_asset_conversion_ops, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; +} + +#[derive_impl(pallet_assets::config_preludes::TestDefaultConfig)] +impl pallet_assets::Config for Test { + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Freezer = (); +} + +#[derive_impl(pallet_assets::config_preludes::TestDefaultConfig)] +impl pallet_assets::Config for Test { + type Currency = Balances; + type CreateOrigin = + AsEnsureOriginWithArg>; + type ForceOrigin = frame_system::EnsureRoot; + type Freezer = (); +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub const Native: NativeOrWithId = NativeOrWithId::Native; + pub storage LiquidityWithdrawalFee: Permill = Permill::from_percent(0); +} + +ord_parameter_types! { + pub const AssetConversionOrigin: u64 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +pub type NativeAndAssets = UnionOf, u64>; +pub type PoolIdToAccountId = + AccountIdConverter, NativeOrWithId)>; +pub type AscendingLocator = Ascending, PoolIdToAccountId>; + +impl pallet_asset_conversion::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = ::Balance; + type HigherPrecisionBalance = sp_core::U256; + type AssetKind = NativeOrWithId; + type Assets = NativeAndAssets; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = AscendingLocator; + type PoolAssetId = u32; + type PoolAssets = PoolAssets; + type PoolSetupFee = ConstU64<100>; + type PoolSetupFeeAsset = Native; + type PoolSetupFeeTarget = ResolveAssetTo; + type PalletId = AssetConversionPalletId; + type WeightInfo = (); + type LPFee = ConstU32<3>; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type MaxSwapPathLength = ConstU32<4>; + type MintMinLiquidity = ConstU64<100>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +pub type OldPoolIdToAccountId = + AccountIdConverterNoSeed<(NativeOrWithId, NativeOrWithId)>; + +impl pallet_asset_conversion_ops::Config for Test { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = OldPoolIdToAccountId; + type AssetsRefund = NativeAndAssets; + type PoolAssetsRefund = PoolAssets; + type PoolAssetsTeam = PoolAssets; + type DepositAsset = Balances; + type WeightInfo = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10000), (2, 20000), (3, 30000), (4, 40000)], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} diff --git a/substrate/frame/asset-conversion/ops/src/tests.rs b/substrate/frame/asset-conversion/ops/src/tests.rs new file mode 100644 index 00000000000..84bbe633674 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/tests.rs @@ -0,0 +1,308 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Asset Conversion Ops pallet tests. + +use crate::{mock::*, *}; +use frame_support::{ + assert_noop, assert_ok, + traits::{ + fungible::{Inspect as FungibleInspect, NativeOrWithId}, + fungibles::{Create, Inspect}, + Incrementable, + }, +}; + +#[test] +fn migrate_pool_account_id_with_native() { + new_test_ext().execute_with(|| { + type PoolLocator = ::PoolLocator; + let user = 1; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + let pool_id = PoolLocator::pool_id(&token_1, &token_2).unwrap(); + let lp_token = + ::PoolAssetId::initial_value().unwrap(); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_2.clone(), user, false, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + let ed = Balances::minimum_balance(); + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), user, 10000 * 2 + ed)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 10, + 10000, + 10, + user, + )); + + // assert user's balance. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000 + ed); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // record total issuances before migration. + let total_issuance_token1 = NativeAndAssets::total_issuance(token_1.clone()); + let total_issuance_token2 = NativeAndAssets::total_issuance(token_2.clone()); + let total_issuance_lp_token = PoolAssets::total_issuance(lp_token); + + let pool_account = PoolLocator::address(&pool_id).unwrap(); + let (prior_pool_account, new_pool_account) = + AssetConversionOps::addresses(&pool_id).unwrap(); + assert_eq!(pool_account, prior_pool_account); + + // assert pool's balances before migration. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 100); + + // migrate. + assert_ok!(AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + )); + + // assert user's balance has not changed. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000 + ed); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // assert pool's balance on new account id is same as on prior account id. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &new_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &new_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &new_pool_account), 100); + + // assert pool's balance on prior account id is zero. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 0); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 0); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 0); + + // assert total issuance has not changed. + assert_eq!(total_issuance_token1, NativeAndAssets::total_issuance(token_1)); + assert_eq!(total_issuance_token2, NativeAndAssets::total_issuance(token_2)); + assert_eq!(total_issuance_lp_token, PoolAssets::total_issuance(lp_token)); + }); +} + +#[test] +fn migrate_pool_account_id_with_insufficient_assets() { + new_test_ext().execute_with(|| { + type PoolLocator = ::PoolLocator; + let user = 1; + let token_1 = NativeOrWithId::WithId(1); + let token_2 = NativeOrWithId::WithId(2); + let pool_id = PoolLocator::pool_id(&token_1, &token_2).unwrap(); + let lp_token = + ::PoolAssetId::initial_value().unwrap(); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_1.clone(), user, false, 1)); + assert_ok!(NativeAndAssets::create(token_2.clone(), user, false, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 1, user, 20000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 10, + 10000, + 10, + user, + )); + + // assert user's balance. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // record total issuances before migration. + let total_issuance_token1 = NativeAndAssets::total_issuance(token_1.clone()); + let total_issuance_token2 = NativeAndAssets::total_issuance(token_2.clone()); + let total_issuance_lp_token = PoolAssets::total_issuance(lp_token); + + let pool_account = PoolLocator::address(&pool_id).unwrap(); + let (prior_pool_account, new_pool_account) = + AssetConversionOps::addresses(&pool_id).unwrap(); + assert_eq!(pool_account, prior_pool_account); + + // assert pool's balances before migration. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 100); + + // migrate. + assert_ok!(AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + )); + + // assert user's balance has not changed. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // assert pool's balance on new account id is same as on prior account id. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &new_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &new_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &new_pool_account), 100); + + // assert pool's balance on prior account id is zero. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 0); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 0); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 0); + + // assert total issuance has not changed. + assert_eq!(total_issuance_token1, NativeAndAssets::total_issuance(token_1)); + assert_eq!(total_issuance_token2, NativeAndAssets::total_issuance(token_2)); + assert_eq!(total_issuance_lp_token, PoolAssets::total_issuance(lp_token)); + }); +} + +#[test] +fn migrate_pool_account_id_with_sufficient_assets() { + new_test_ext().execute_with(|| { + type PoolLocator = ::PoolLocator; + let user = 1; + let token_1 = NativeOrWithId::WithId(1); + let token_2 = NativeOrWithId::WithId(2); + let pool_id = PoolLocator::pool_id(&token_1, &token_2).unwrap(); + let lp_token = + ::PoolAssetId::initial_value().unwrap(); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_1.clone(), user, true, 1)); + assert_ok!(NativeAndAssets::create(token_2.clone(), user, true, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 1, user, 20000)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(user), 2, user, 1000)); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + 10000, + 10, + 10000, + 10, + user, + )); + + // assert user's balance. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // record total issuances before migration. + let total_issuance_token1 = NativeAndAssets::total_issuance(token_1.clone()); + let total_issuance_token2 = NativeAndAssets::total_issuance(token_2.clone()); + let total_issuance_lp_token = PoolAssets::total_issuance(lp_token); + + let pool_account = PoolLocator::address(&pool_id).unwrap(); + let (prior_pool_account, new_pool_account) = + AssetConversionOps::addresses(&pool_id).unwrap(); + assert_eq!(pool_account, prior_pool_account); + + // assert pool's balances before migration. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 100); + + // migrate. + assert_ok!(AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + )); + + // assert user's balance has not changed. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &user), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &user), 1000 - 10); + assert_eq!(PoolAssets::balance(lp_token, &user), 216); + + // assert pool's balance on new account id is same as on prior account id. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &new_pool_account), 10000); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &new_pool_account), 10); + assert_eq!(PoolAssets::balance(lp_token, &new_pool_account), 100); + + // assert pool's balance on prior account id is zero. + assert_eq!(NativeAndAssets::balance(token_1.clone(), &prior_pool_account), 0); + assert_eq!(NativeAndAssets::balance(token_2.clone(), &prior_pool_account), 0); + assert_eq!(PoolAssets::balance(lp_token, &prior_pool_account), 0); + + // assert total issuance has not changed. + assert_eq!(total_issuance_token1, NativeAndAssets::total_issuance(token_1)); + assert_eq!(total_issuance_token2, NativeAndAssets::total_issuance(token_2)); + assert_eq!(total_issuance_lp_token, PoolAssets::total_issuance(lp_token)); + }); +} + +#[test] +fn migrate_empty_pool_account_id() { + new_test_ext().execute_with(|| { + let user = 1; + let token_1 = NativeOrWithId::Native; + let token_2 = NativeOrWithId::WithId(2); + + // setup pool and provide some liquidity. + assert_ok!(NativeAndAssets::create(token_2.clone(), user, false, 1)); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()) + )); + + // migrate. + assert_noop!( + AssetConversionOps::migrate_to_new_account( + RuntimeOrigin::signed(user), + Box::new(token_1.clone()), + Box::new(token_2.clone()), + ), + Error::::ZeroBalance + ); + }); +} diff --git a/substrate/frame/asset-conversion/ops/src/weights.rs b/substrate/frame/asset-conversion/ops/src/weights.rs new file mode 100644 index 00000000000..9e7379c5015 --- /dev/null +++ b/substrate/frame/asset-conversion/ops/src/weights.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/production/substrate-node +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion_ops +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/asset-conversion-ops/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_conversion_ops`. +pub trait WeightInfo { + fn migrate_to_new_account() -> Weight; +} + +/// Weights for `pallet_asset_conversion_ops` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:4 w:4) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1762` + // Estimated: `11426` + // Minimum execution time: 223_850_000 picoseconds. + Weight::from_parts(231_676_000, 11426) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(11_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:4 w:4) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1762` + // Estimated: `11426` + // Minimum execution time: 223_850_000 picoseconds. + Weight::from_parts(231_676_000, 11426) + .saturating_add(RocksDbWeight::get().reads(12_u64)) + .saturating_add(RocksDbWeight::get().writes(11_u64)) + } +} diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 4591b87c186..477866e0051 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -137,8 +137,11 @@ ord_parameter_types! { } pub type NativeAndAssets = UnionOf, u128>; -pub type AscendingLocator = Ascending>; -pub type WithFirstAssetLocator = WithFirstAsset>; +pub type PoolIdToAccountId = + AccountIdConverter, NativeOrWithId)>; +pub type AscendingLocator = Ascending, PoolIdToAccountId>; +pub type WithFirstAssetLocator = + WithFirstAsset, PoolIdToAccountId>; impl Config for Test { type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/asset-conversion/src/types.rs b/substrate/frame/asset-conversion/src/types.rs index 5ee81c2012d..27c0e8e6880 100644 --- a/substrate/frame/asset-conversion/src/types.rs +++ b/substrate/frame/asset-conversion/src/types.rs @@ -19,6 +19,7 @@ use super::*; use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use scale_info::TypeInfo; +use sp_runtime::traits::TryConvert; /// Represents a swap path with associated asset amounts indicating how much of the asset needs to /// be deposited to get the following asset's amount withdrawn (this is inclusive of fees). @@ -68,49 +69,59 @@ pub trait PoolLocator { /// /// The `PoolId` is represented as a tuple of `AssetKind`s with `FirstAsset` always positioned as /// the first element. -pub struct WithFirstAsset( - PhantomData<(FirstAsset, AccountId, AssetKind)>, +pub struct WithFirstAsset( + PhantomData<(FirstAsset, AccountId, AssetKind, AccountIdConverter)>, ); -impl PoolLocator - for WithFirstAsset +impl + PoolLocator + for WithFirstAsset where AssetKind: Eq + Clone + Encode, AccountId: Decode, FirstAsset: Get, + AccountIdConverter: for<'a> TryConvert<&'a (AssetKind, AssetKind), AccountId>, { fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { + if asset1 == asset2 { + return Err(()); + } let first = FirstAsset::get(); - match true { - _ if asset1 == asset2 => Err(()), - _ if first == *asset1 => Ok((first, asset2.clone())), - _ if first == *asset2 => Ok((first, asset1.clone())), - _ => Err(()), + if first == *asset1 { + Ok((first, asset2.clone())) + } else if first == *asset2 { + Ok((first, asset1.clone())) + } else { + Err(()) } } fn address(id: &(AssetKind, AssetKind)) -> Result { - let encoded = sp_io::hashing::blake2_256(&Encode::encode(id)[..]); - Decode::decode(&mut TrailingZeroInput::new(encoded.as_ref())).map_err(|_| ()) + AccountIdConverter::try_convert(id).map_err(|_| ()) } } /// Pool locator where the `PoolId` is a tuple of `AssetKind`s arranged in ascending order. -pub struct Ascending(PhantomData<(AccountId, AssetKind)>); -impl PoolLocator - for Ascending +pub struct Ascending( + PhantomData<(AccountId, AssetKind, AccountIdConverter)>, +); +impl + PoolLocator + for Ascending where AssetKind: Ord + Clone + Encode, AccountId: Decode, + AccountIdConverter: for<'a> TryConvert<&'a (AssetKind, AssetKind), AccountId>, { fn pool_id(asset1: &AssetKind, asset2: &AssetKind) -> Result<(AssetKind, AssetKind), ()> { - match true { - _ if asset1 > asset2 => Ok((asset2.clone(), asset1.clone())), - _ if asset1 < asset2 => Ok((asset1.clone(), asset2.clone())), - _ => Err(()), + if asset1 > asset2 { + Ok((asset2.clone(), asset1.clone())) + } else if asset1 < asset2 { + Ok((asset1.clone(), asset2.clone())) + } else { + Err(()) } } fn address(id: &(AssetKind, AssetKind)) -> Result { - let encoded = sp_io::hashing::blake2_256(&Encode::encode(id)[..]); - Decode::decode(&mut TrailingZeroInput::new(encoded.as_ref())).map_err(|_| ()) + AccountIdConverter::try_convert(id).map_err(|_| ()) } } @@ -131,3 +142,30 @@ where First::address(id).or(Second::address(id)) } } + +/// `PoolId` to `AccountId` conversion. +pub struct AccountIdConverter(PhantomData<(Seed, PoolId)>); +impl TryConvert<&PoolId, AccountId> for AccountIdConverter +where + PoolId: Encode, + AccountId: Decode, + Seed: Get, +{ + fn try_convert(id: &PoolId) -> Result { + sp_io::hashing::blake2_256(&Encode::encode(&(Seed::get(), id))[..]) + .using_encoded(|e| Decode::decode(&mut TrailingZeroInput::new(e)).map_err(|_| id)) + } +} + +/// `PoolId` to `AccountId` conversion without an addition arguments to the seed. +pub struct AccountIdConverterNoSeed(PhantomData); +impl TryConvert<&PoolId, AccountId> for AccountIdConverterNoSeed +where + PoolId: Encode, + AccountId: Decode, +{ + fn try_convert(id: &PoolId) -> Result { + sp_io::hashing::blake2_256(&Encode::encode(id)[..]) + .using_encoded(|e| Decode::decode(&mut TrailingZeroInput::new(e)).map_err(|_| id)) + } +} diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index 4a5fb06ee2c..9309d010117 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -368,11 +368,14 @@ impl, I: 'static> Pallet { Ok(()) } - /// Returns a `DepositFrom` of an account only if balance is zero. + /// Refunds the `DepositFrom` of an account only if its balance is zero. + /// + /// If the `maybe_check_caller` parameter is specified, it must match the account that provided + /// the deposit or must be the admin of the asset. pub(super) fn do_refund_other( id: T::AssetId, who: &T::AccountId, - caller: &T::AccountId, + maybe_check_caller: Option, ) -> DispatchResult { let mut account = Account::::get(&id, &who).ok_or(Error::::NoDeposit)?; let (depositor, deposit) = @@ -380,7 +383,9 @@ impl, I: 'static> Pallet { let mut details = Asset::::get(&id).ok_or(Error::::Unknown)?; ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); ensure!(!account.status.is_frozen(), Error::::Frozen); - ensure!(caller == &depositor || caller == &details.admin, Error::::NoPermission); + if let Some(caller) = maybe_check_caller { + ensure!(caller == depositor || caller == details.admin, Error::::NoPermission); + } ensure!(account.balance.is_zero(), Error::::WouldBurn); T::Currency::unreserve(&depositor, deposit); @@ -1013,4 +1018,28 @@ impl, I: 'static> Pallet { }) .collect::>() } + + /// Reset the team for the asset with the given `id`. + /// + /// ### Parameters + /// - `id`: The identifier of the asset for which the team is being reset. + /// - `owner`: The new `owner` account for the asset. + /// - `admin`: The new `admin` account for the asset. + /// - `issuer`: The new `issuer` account for the asset. + /// - `freezer`: The new `freezer` account for the asset. + pub(crate) fn do_reset_team( + id: T::AssetId, + owner: T::AccountId, + admin: T::AccountId, + issuer: T::AccountId, + freezer: T::AccountId, + ) -> DispatchResult { + let mut d = Asset::::get(&id).ok_or(Error::::Unknown)?; + d.owner = owner; + d.admin = admin; + d.issuer = issuer; + d.freezer = freezer; + Asset::::insert(&id, d); + Ok(()) + } } diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs index 123abeba828..9f837a60434 100644 --- a/substrate/frame/assets/src/impl_fungibles.rs +++ b/substrate/frame/assets/src/impl_fungibles.rs @@ -308,3 +308,35 @@ impl, I: 'static> fungibles::InspectEnumerable for Pa Asset::::iter_keys() } } + +impl, I: 'static> fungibles::roles::ResetTeam for Pallet { + fn reset_team( + id: T::AssetId, + owner: T::AccountId, + admin: T::AccountId, + issuer: T::AccountId, + freezer: T::AccountId, + ) -> DispatchResult { + Self::do_reset_team(id, owner, admin, issuer, freezer) + } +} + +impl, I: 'static> fungibles::Refund for Pallet { + type AssetId = T::AssetId; + type Balance = DepositBalanceOf; + fn deposit_held(id: Self::AssetId, who: T::AccountId) -> Option<(T::AccountId, Self::Balance)> { + use ExistenceReason::*; + match Account::::get(&id, &who).ok_or(Error::::NoDeposit).ok()?.reason { + DepositHeld(b) => Some((who, b)), + DepositFrom(d, b) => Some((d, b)), + _ => None, + } + } + fn refund(id: Self::AssetId, who: T::AccountId) -> DispatchResult { + match Self::deposit_held(id.clone(), who.clone()) { + Some((d, _)) if d == who => Self::do_refund(id, who, false), + Some(..) => Self::do_refund_other(id, &who, None), + None => Err(Error::::NoDeposit.into()), + } + } +} diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index c6b379e1d06..37073b28065 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -1644,7 +1644,7 @@ pub mod pallet { let origin = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; let id: T::AssetId = id.into(); - Self::do_refund_other(id, &who, &origin) + Self::do_refund_other(id, &who, Some(origin)) } /// Disallow further unprivileged transfers of an asset `id` to and from an account `who`. diff --git a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs index 575b771a614..db44b2f43a4 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/union_of.rs @@ -925,3 +925,28 @@ impl< } } } + +impl< + Left: fungible::Inspect, + Right: fungibles::Inspect + fungibles::Refund, + Criterion: Convert>::AssetId>>, + AssetKind: AssetId, + AccountId, + > fungibles::Refund for UnionOf +{ + type AssetId = AssetKind; + type Balance = >::Balance; + + fn deposit_held(asset: AssetKind, who: AccountId) -> Option<(AccountId, Self::Balance)> { + match Criterion::convert(asset) { + Left(()) => None, + Right(a) => >::deposit_held(a, who), + } + } + fn refund(asset: AssetKind, who: AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(()) => Err(DispatchError::Unavailable), + Right(a) => >::refund(a, who), + } + } +} diff --git a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs index 49f6c846ccd..7f882c1fc4f 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/lifetime.rs @@ -15,13 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for creating and destroying assets. +//! Traits for creating, editing and destroying assets. //! //! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. -use sp_runtime::{DispatchError, DispatchResult}; - use super::Inspect; +use crate::traits::tokens::{AssetId, Balance}; +use sp_runtime::{DispatchError, DispatchResult}; /// Trait for providing the ability to create new fungible assets. pub trait Create: Inspect { @@ -34,6 +34,22 @@ pub trait Create: Inspect { ) -> DispatchResult; } +/// Trait for refunding the existence deposit of a target asset account. +/// +/// The existence deposit might by necessary and present in cases where the asset held by the +/// account is insufficient for the required storage, or when the system cannot provide a consumer +/// reference for any reason. +pub trait Refund { + /// Means of identifying one asset class from another. + type AssetId: AssetId; + /// Scalar type for representing deposit balance of an account. + type Balance: Balance; + /// Returns the amount of account deposit and depositor address, if any. + fn deposit_held(id: Self::AssetId, who: AccountId) -> Option<(AccountId, Self::Balance)>; + /// Return the deposit (if any) of a target asset account. + fn refund(id: Self::AssetId, who: AccountId) -> DispatchResult; +} + /// Trait for providing the ability to destroy existing fungible assets. pub trait Destroy: Inspect { /// Start the destruction an existing fungible asset. diff --git a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs index 2122fdeaed3..8b4ea4d13cf 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/mod.rs @@ -44,7 +44,7 @@ pub use hold::{ Unbalanced as UnbalancedHold, }; pub use imbalance::{Credit, Debt, HandleImbalanceDrop, Imbalance}; -pub use lifetime::{Create, Destroy}; +pub use lifetime::{Create, Destroy, Refund}; pub use regular::{ Balanced, DecreaseIssuance, Dust, IncreaseIssuance, Inspect, Mutate, Unbalanced, }; diff --git a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs index 4f95ad8368c..3e68d6b9451 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/roles.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/roles.rs @@ -19,6 +19,8 @@ //! //! See the [`crate::traits::fungibles`] doc for more information about fungibles traits. +use sp_runtime::DispatchResult; + pub trait Inspect: super::Inspect { // Get owner for an AssetId. fn owner(asset: Self::AssetId) -> Option; @@ -29,3 +31,22 @@ pub trait Inspect: super::Inspect { // Get freezer for an AssetId. fn freezer(asset: Self::AssetId) -> Option; } + +/// Trait for resetting the team configuration of an existing fungible asset. +pub trait ResetTeam: super::Inspect { + /// Reset the team for the asset with the given `id`. + /// + /// ### Parameters + /// - `id`: The identifier of the asset for which the team is being reset. + /// - `owner`: The new `owner` account for the asset. + /// - `admin`: The new `admin` account for the asset. + /// - `issuer`: The new `issuer` account for the asset. + /// - `freezer`: The new `freezer` account for the asset. + fn reset_team( + id: Self::AssetId, + owner: AccountId, + admin: AccountId, + issuer: AccountId, + freezer: AccountId, + ) -> DispatchResult; +} diff --git a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs index c8a1ec37e78..2c7d4bab7ba 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/union_of.rs @@ -904,3 +904,35 @@ impl< } } } + +impl< + Left: fungibles::Inspect + fungibles::Refund, + Right: fungibles::Inspect + + fungibles::Refund>::Balance>, + Criterion: Convert< + AssetKind, + Either< + >::AssetId, + >::AssetId, + >, + >, + AssetKind: AssetId, + AccountId, + > fungibles::Refund for UnionOf +{ + type AssetId = AssetKind; + type Balance = >::Balance; + + fn deposit_held(asset: AssetKind, who: AccountId) -> Option<(AccountId, Self::Balance)> { + match Criterion::convert(asset) { + Left(a) => >::deposit_held(a, who), + Right(a) => >::deposit_held(a, who), + } + } + fn refund(asset: AssetKind, who: AccountId) -> DispatchResult { + match Criterion::convert(asset) { + Left(a) => >::refund(a, who), + Right(a) => >::refund(a, who), + } + } +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index d61558cf536..9a2b22b8170 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -244,6 +244,11 @@ ord_parameter_types! { pub const AssetConversionOrigin: u64 = AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); } +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (NativeOrWithId, NativeOrWithId), +>; + impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; @@ -252,8 +257,8 @@ impl pallet_asset_conversion::Config for Runtime { type Assets = UnionOf, AccountId>; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = Chain< - WithFirstAsset>, - Ascending>, + WithFirstAsset, PoolIdToAccountId>, + Ascending, PoolIdToAccountId>, >; type PoolAssetId = u32; type PoolAssets = PoolAssets; -- GitLab From ca7c01c8d85f10cf330008686a03ce362202ae8e Mon Sep 17 00:00:00 2001 From: gui Date: Thu, 18 Apr 2024 00:22:46 +0900 Subject: [PATCH 017/107] Improve doc for pallet macro and config macro (#4146) Improve doc: * the pallet macro is actually referring to 2 places, for the module and for the struct placeholder but doesn't really clarify it (I should have named the latter just `pallet_struct` or something but it is a bit late) * The doc of `with_default` is a bit confusing too IMO. CC @kianenigma --------- Co-authored-by: Liam Aharon --- substrate/frame/support/src/lib.rs | 141 +++++++++++++++++++++-------- 1 file changed, 103 insertions(+), 38 deletions(-) diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 895215d364e..984a7f7537f 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -936,6 +936,83 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } +/// The pallet macro has 2 purposes: +/// +/// * [For declaring a pallet as a rust module](#1---pallet-module-declaration) +/// * [For declaring the `struct` placeholder of a +/// pallet](#2---pallet-struct-placeholder-declaration) +/// +/// # 1 - Pallet module declaration +/// +/// The module to declare a pallet is organized as follow: +/// ``` +/// #[frame_support::pallet] // <- the macro +/// mod pallet { +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} +/// +/// #[pallet::call] +/// impl Pallet { +/// } +/// +/// /* ... */ +/// } +/// ``` +/// +/// The documentation for each individual part can be found at [frame_support::pallet_macros] +/// +/// ## Dev Mode (`#[pallet(dev_mode)]`) +/// +/// Syntax: +/// +/// ``` +/// #[frame_support::pallet(dev_mode)] +/// mod pallet { +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// /* ... */ +/// } +/// ``` +/// +/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The +/// aim of dev mode is to loosen some of the restrictions and requirements placed on +/// production pallets for easy tinkering and development. Dev mode pallets should not be +/// used in production. Enabling dev mode has the following effects: +/// +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not +/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not +/// specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. +/// * All storages are marked as unbounded, meaning you do not need to implement +/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is +/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, +/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` +/// can simply be ignored when in `dev_mode`. +/// +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This +/// argument cannot be specified anywhere else, including but not limited to the +/// `#[pallet::pallet]` attribute macro. +/// +///
+/// WARNING:
+/// You should never deploy or use dev mode pallets in production. Doing so can break your
+/// chain. Once you are done tinkering, you should
+/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
+/// errors before attempting to use your pallet in a production scenario.
+/// 
+/// +/// # 2 - Pallet struct placeholder declaration +/// /// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to /// specify pallet information. /// @@ -984,40 +1061,6 @@ pub mod pallet_prelude { /// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the pallet using the /// [`PartialStorageInfoTrait`](frame_support::traits::PartialStorageInfoTrait) /// implementation of storages. -/// -/// ## Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The -/// aim of dev mode is to loosen some of the restrictions and requirements placed on -/// production pallets for easy tinkering and development. Dev mode pallets should not be -/// used in production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not -/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not -/// specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement -/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is -/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, -/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` -/// can simply be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This -/// argument cannot be specified anywhere else, including but not limited to the -/// `#[pallet::pallet]` attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your
-/// chain and therefore should never be done. Once you are done tinkering, you should
-/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
-/// errors before attempting to use your pallet in a production scenario.
-/// 
pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the `pallet::` macros @@ -1456,16 +1499,24 @@ pub mod pallet_macros { /// # use core::fmt::Debug; /// # use frame_support::traits::Contains; /// # + /// # pub trait SomeMoreComplexBound {} + /// # /// #[pallet::pallet] /// pub struct Pallet(_); /// /// #[pallet::config(with_default)] // <- with_default is optional /// pub trait Config: frame_system::Config { /// /// The overarching event type. - /// #[pallet::no_default_bounds] // Default is not supported for RuntimeEvent + /// #[pallet::no_default_bounds] // Default with bounds is not supported for RuntimeEvent /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// - /// // ...other config items get default + /// /// A more complex type. + /// #[pallet::no_default] // Example of type where no default should be provided + /// type MoreComplexType: SomeMoreComplexBound; + /// + /// /// A simple type. + /// // Default with bounds is supported for simple types + /// type SimpleType: From; /// } /// /// #[pallet::event] @@ -1475,12 +1526,23 @@ pub mod pallet_macros { /// } /// ``` /// - /// As shown above, you may also attach the [`#[pallet::no_default]`](`no_default`) + /// As shown above: + /// * you may attach the [`#[pallet::no_default]`](`no_default`) /// attribute to specify that a particular trait item _cannot_ be used as a default when a /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) /// attribute macro. This will cause that particular trait item to simply not appear in /// default testing configs based on this config (the trait item will not be included in /// `DefaultConfig`). + /// * you may attach the [`#[pallet::no_default_bounds]`](`no_default_bounds`) + /// attribute to specify that a particular trait item can be used as a default when a + /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) + /// attribute macro. But its bounds cannot be enforced at this point and should be + /// discarded when generating the default config trait. + /// * you may not specify any attribute to generate a trait item in the default config + /// trait. + /// + /// In case origin of error is not clear it is recommended to disable all default with + /// [`#[pallet::no_default]`](`no_default`) and enable them one by one. /// /// ### `DefaultConfig` Caveats /// @@ -1500,7 +1562,10 @@ pub mod pallet_macros { /// the `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to /// implement such items. /// - /// For more information, see [`frame_support::derive_impl`]. + /// For more information, see: + /// * [`frame_support::derive_impl`]. + /// * [`#[pallet::no_default]`](`no_default`) + /// * [`#[pallet::no_default_bounds]`](`no_default_bounds`) pub use frame_support_procedural::config; /// Allows defining an enum that gets composed as an aggregate enum by `construct_runtime`. -- GitLab From bfbf7f5d6f5c491a820bb0a4fb9508ce52192a06 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 17 Apr 2024 18:29:29 +0300 Subject: [PATCH 018/107] chainHead: Report unique hashes for pruned blocks (#3667) This PR ensures that the reported pruned blocks are unique. While at it, ensure that the best block event is properly generated when the last best block is a fork that will be pruned in the future. To achieve this, the chainHead keeps a LRU set of reported pruned blocks to ensure the following are not reported twice: ```bash finalized -> block 1 -> block 2 -> block 3 -> block 2 -> block 4 -> block 5 -> block 1 -> block 2_f -> block 6 -> block 7 -> block 8 ``` When block 7 is finalized the branch [block 2; block 3] is reported as pruned. When block 8 is finalized the branch [block 2; block 4; block 5] should be reported as pruned, however block 2 was already reported as pruned at the previous step. This is a side-effect of the pruned blocks being reported at level N - 1. For example, if all pruned forks would be reported with the first encounter (when block 6 is finalized we know that block 3 and block 5 are stale), we would not need the LRU cache. cc @paritytech/subxt-team Closes https://github.com/paritytech/polkadot-sdk/issues/3658 --------- Signed-off-by: Alexandru Vasile Co-authored-by: Sebastian Kunert --- Cargo.lock | 1 + substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../rpc-spec-v2/src/chain_head/chain_head.rs | 2 +- .../src/chain_head/chain_head_follow.rs | 108 ++++----- .../rpc-spec-v2/src/chain_head/tests.rs | 213 ++++++++++++++++++ 5 files changed, 273 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aeb8b2cdb19..7bf5215b6de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17465,6 +17465,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "serde_json", "sp-api", diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index e2612d91454..f2fc7bee6e2 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -42,6 +42,7 @@ array-bytes = "6.1" log = { workspace = true, default-features = true } futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" +schnellru = "0.2.1" [dev-dependencies] jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 86d9a726d7b..6779180a414 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -75,7 +75,7 @@ pub struct ChainHeadConfig { /// Maximum pinned blocks across all connections. /// This number is large enough to consider immediate blocks. /// Note: This should never exceed the `PINNING_CACHE_SIZE` from client/db. -const MAX_PINNED_BLOCKS: usize = 512; +pub(crate) const MAX_PINNED_BLOCKS: usize = 512; /// Any block of any subscription should not be pinned more than /// this constant. When a subscription contains a block older than this, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index 0d87a45c07e..a753896b24c 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -19,7 +19,7 @@ //! Implementation of the `chainHead_follow` method. use crate::chain_head::{ - chain_head::LOG_TARGET, + chain_head::{LOG_TARGET, MAX_PINNED_BLOCKS}, event::{ BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent, RuntimeVersionEvent, @@ -37,6 +37,7 @@ use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, }; use sc_rpc::utils::to_sub_message; +use schnellru::{ByLength, LruMap}; use sp_api::CallApiAt; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, @@ -68,7 +69,9 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Subscription ID. sub_id: String, /// The best reported block by this subscription. - best_block_cache: Option, + current_best_block: Option, + /// LRU cache of pruned blocks. + pruned_blocks: LruMap, /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. max_lagging_distance: usize, @@ -90,7 +93,10 @@ impl, Block: BlockT, Client> ChainHeadFollower, - ) -> Result<(Vec>, HashSet), SubscriptionManagementError> - { + ) -> Result>, SubscriptionManagementError> { let init = self.get_init_blocks_with_forks(startup_point.finalized_hash)?; // The initialized event is the first one sent. let initial_blocks = init.finalized_block_descendants; let finalized_block_hashes = init.finalized_block_hashes; + // These are the pruned blocks that we should not report again. + for pruned in init.pruned_forks { + self.pruned_blocks.insert(pruned, ()); + } let finalized_block_hash = startup_point.finalized_hash; let finalized_block_runtime = self.generate_runtime_event(finalized_block_hash, None); @@ -345,11 +353,11 @@ where let best_block_hash = startup_point.best_hash; if best_block_hash != finalized_block_hash { let best_block = FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash }); - self.best_block_cache = Some(best_block_hash); + self.current_best_block = Some(best_block_hash); finalized_block_descendants.push(best_block); }; - Ok((finalized_block_descendants, init.pruned_forks)) + Ok(finalized_block_descendants) } /// Generate the "NewBlock" event and potentially the "BestBlockChanged" event for the @@ -377,19 +385,19 @@ where let best_block_event = FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash: block_hash }); - match self.best_block_cache { + match self.current_best_block { Some(block_cache) => { // The RPC layer has not reported this block as best before. // Note: This handles the race with the finalized branch. if block_cache != block_hash { - self.best_block_cache = Some(block_hash); + self.current_best_block = Some(block_hash); vec![new_block, best_block_event] } else { vec![new_block] } }, None => { - self.best_block_cache = Some(block_hash); + self.current_best_block = Some(block_hash); vec![new_block, best_block_event] }, } @@ -458,7 +466,7 @@ where // When the node falls out of sync and then syncs up to the tip of the chain, it can // happen that we skip notifications. Then it is better to terminate the connection // instead of trying to send notifications for all missed blocks. - if let Some(best_block_hash) = self.best_block_cache { + if let Some(best_block_hash) = self.current_best_block { let ancestor = sp_blockchain::lowest_common_ancestor( &*self.client, *hash, @@ -481,13 +489,10 @@ where } /// Get all pruned block hashes from the provided stale heads. - /// - /// The result does not include hashes from `to_ignore`. fn get_pruned_hashes( - &self, + &mut self, stale_heads: &[Block::Hash], last_finalized: Block::Hash, - to_ignore: &mut HashSet, ) -> Result, SubscriptionManagementError> { let blockchain = self.backend.blockchain(); let mut pruned = Vec::new(); @@ -497,11 +502,13 @@ where // Collect only blocks that are not part of the canonical chain. pruned.extend(tree_route.enacted().iter().filter_map(|block| { - if !to_ignore.remove(&block.hash) { - Some(block.hash) - } else { - None + if self.pruned_blocks.get(&block.hash).is_some() { + // The block was already reported as pruned. + return None } + + self.pruned_blocks.insert(block.hash, ()); + Some(block.hash) })) } @@ -515,7 +522,6 @@ where fn handle_finalized_blocks( &mut self, notification: FinalityNotification, - to_ignore: &mut HashSet, startup_point: &StartupPoint, ) -> Result>, SubscriptionManagementError> { let last_finalized = notification.hash; @@ -536,25 +542,32 @@ where // Report all pruned blocks from the notification that are not // part of the fork we need to ignore. let pruned_block_hashes = - self.get_pruned_hashes(¬ification.stale_heads, last_finalized, to_ignore)?; + self.get_pruned_hashes(¬ification.stale_heads, last_finalized)?; let finalized_event = FollowEvent::Finalized(Finalized { finalized_block_hashes, pruned_block_hashes: pruned_block_hashes.clone(), }); - match self.best_block_cache { - Some(block_cache) => { - // If the best block wasn't pruned, we are done here. - if !pruned_block_hashes.iter().any(|hash| *hash == block_cache) { - events.push(finalized_event); - return Ok(events) - } - - // The best block is reported as pruned. Therefore, we need to signal a new - // best block event before submitting the finalized event. + if let Some(current_best_block) = self.current_best_block { + // The best reported block is in the pruned list. Report a new best block. + let is_in_pruned_list = + pruned_block_hashes.iter().any(|hash| *hash == current_best_block); + // The block is not the last finalized block. + // + // It can be either: + // - a descendant of the last finalized block + // - a block on a fork that will be pruned in the future. + // + // In those cases, we emit a new best block. + let is_not_last_finalized = current_best_block != last_finalized; + + if is_in_pruned_list || is_not_last_finalized { + // We need to generate a best block event. let best_block_hash = self.client.info().best_hash; - if best_block_hash == block_cache { + + // Defensive check against state missmatch. + if best_block_hash == current_best_block { // The client doest not have any new information about the best block. // The information from `.info()` is updated from the DB as the last // step of the finalization and it should be up to date. @@ -564,23 +577,18 @@ where "[follow][id={:?}] Client does not contain different best block", self.sub_id, ); - events.push(finalized_event); - Ok(events) } else { // The RPC needs to also submit a new best block changed before the // finalized event. - self.best_block_cache = Some(best_block_hash); - let best_block_event = - FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash }); - events.extend([best_block_event, finalized_event]); - Ok(events) + self.current_best_block = Some(best_block_hash); + events + .push(FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash })); } - }, - None => { - events.push(finalized_event); - Ok(events) - }, + } } + + events.push(finalized_event); + Ok(events) } /// Submit the events from the provided stream to the RPC client @@ -589,7 +597,6 @@ where &mut self, startup_point: &StartupPoint, mut stream: EventStream, - mut to_ignore: HashSet, sink: SubscriptionSink, rx_stop: oneshot::Receiver<()>, ) -> Result<(), SubscriptionManagementError> @@ -612,7 +619,7 @@ where NotificationType::NewBlock(notification) => self.handle_import_blocks(notification, &startup_point), NotificationType::Finalized(notification) => - self.handle_finalized_blocks(notification, &mut to_ignore, &startup_point), + self.handle_finalized_blocks(notification, &startup_point), NotificationType::MethodResponse(notification) => Ok(vec![notification]), }; @@ -682,7 +689,7 @@ where .map(|response| NotificationType::MethodResponse(response)); let startup_point = StartupPoint::from(self.client.info()); - let (initial_events, pruned_forks) = match self.generate_init_events(&startup_point) { + let initial_events = match self.generate_init_events(&startup_point) { Ok(blocks) => blocks, Err(err) => { debug!( @@ -702,7 +709,6 @@ where let merged = tokio_stream::StreamExt::merge(merged, stream_responses); let stream = stream::once(futures::future::ready(initial)).chain(merged); - self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, sub_data.rx_stop) - .await + self.submit_events(&startup_point, stream.boxed(), sink, sub_data.rx_stop).await } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c2bff7c50d5..14f664858a0 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -187,6 +187,62 @@ async fn setup_api() -> ( (client, api, sub, sub_id, block) } +async fn import_block( + mut client: Arc>, + parent_hash: ::Hash, + parent_number: u64, +) -> Block { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(parent_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + block +} + +async fn import_best_block_with_tx( + mut client: Arc>, + parent_hash: ::Hash, + parent_number: u64, + tx: Transfer, +) -> Block { + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(parent_number) + .build() + .unwrap(); + block_builder.push_transfer(tx).unwrap(); + let block = block_builder.build().unwrap().block; + client.import_as_best(BlockOrigin::Own, block.clone()).await.unwrap(); + block +} + +/// Check the subscription produces a new block and a best block event. +/// +/// The macro is used instead of a fn to preserve the lines of code in case of panics. +macro_rules! check_new_and_best_block_events { + ($sub:expr, $block_hash:expr, $parent_hash:expr) => { + let event: FollowEvent = get_next_event($sub).await; + let expected = FollowEvent::NewBlock(NewBlock { + block_hash: format!("{:?}", $block_hash), + parent_block_hash: format!("{:?}", $parent_hash), + new_runtime: None, + with_runtime: false, + }); + assert_eq!(event, expected); + + let event: FollowEvent = get_next_event($sub).await; + let expected = FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: format!("{:?}", $block_hash), + }); + assert_eq!(event, expected); + }; +} + #[tokio::test] async fn follow_subscription_produces_blocks() { let builder = TestClientBuilder::new(); @@ -3644,3 +3700,160 @@ async fn chain_head_limit_reached() { // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; } + +#[tokio::test] +async fn follow_unique_pruned_blocks() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + max_lagging_distance: MAX_LAGGING_DISTANCE, + }, + ) + .into_rpc(); + + let finalized_hash = client.info().finalized_hash; + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + + // Initialized must always be reported first. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], + finalized_block_runtime: None, + with_runtime: false, + }); + assert_eq!(event, expected); + + // Block tree: + // + // finalized -> block 1 -> block 2 -> block 3 + // + // -> block 2 -> block 4 -> block 5 + // + // -> block 1 -> block 2_f -> block 6 + // ^^^ finalized + // -> block 7 + // ^^^ finalized + // -> block 8 + // ^^^ finalized + // The chainHead will see block 5 as the best block. However, the + // client will finalize the block 6, which is on another fork. + // + // When the block 6 is finalized, block 2 block 3 block 4 and block 5 are placed on an invalid + // fork. However, pruning of blocks happens on level N - 1. + // Therefore, no pruned blocks are reported yet. + // + // When the block 7 is finalized, block 3 is detected as stale. At this step, block 2 and 3 + // are reported as pruned. + // + // When the block 8 is finalized, block 5 block 4 and block 2 are detected as stale. However, + // only blocks 5 and 4 are reported as pruned. This is because the block 2 was previously + // reported. + + // Initial setup steps: + let block_1_hash = + import_block(client.clone(), client.chain_info().genesis_hash, 0).await.hash(); + let block_2_f_hash = import_block(client.clone(), block_1_hash, 1).await.hash(); + let block_6_hash = import_block(client.clone(), block_2_f_hash, 2).await.hash(); + // Import block 2 as best on the fork. + let mut tx_alice_ferdie = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }; + let block_2_hash = + import_best_block_with_tx(client.clone(), block_1_hash, 1, tx_alice_ferdie.clone()) + .await + .hash(); + + let block_3_hash = import_block(client.clone(), block_2_hash, 2).await.hash(); + // Fork block 4. + tx_alice_ferdie.nonce = 1; + let block_4_hash = import_best_block_with_tx(client.clone(), block_2_hash, 2, tx_alice_ferdie) + .await + .hash(); + let block_5_hash = import_block(client.clone(), block_4_hash, 3).await.hash(); + + // Check expected events generated by the setup. + { + // Check block 1 -> block 2f -> block 6. + check_new_and_best_block_events!(&mut sub, block_1_hash, finalized_hash); + check_new_and_best_block_events!(&mut sub, block_2_f_hash, block_1_hash); + check_new_and_best_block_events!(&mut sub, block_6_hash, block_2_f_hash); + + // Check (block 1 ->) block 2 -> block 3. + check_new_and_best_block_events!(&mut sub, block_2_hash, block_1_hash); + check_new_and_best_block_events!(&mut sub, block_3_hash, block_2_hash); + + // Check (block 1 -> block 2 ->) block 4 -> block 5. + check_new_and_best_block_events!(&mut sub, block_4_hash, block_2_hash); + check_new_and_best_block_events!(&mut sub, block_5_hash, block_4_hash); + } + + // Finalize the block 6 from the fork. + client.finalize_block(block_6_hash, None).unwrap(); + + // Expect to report the best block changed before the finalized event. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: format!("{:?}", block_6_hash), + }); + assert_eq!(event, expected); + + // Block 2 must be reported as pruned, even if it was the previous best. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![ + format!("{:?}", block_1_hash), + format!("{:?}", block_2_f_hash), + format!("{:?}", block_6_hash), + ], + pruned_block_hashes: vec![], + }); + assert_eq!(event, expected); + + // Pruned hash can be unpinned. + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + let hash = format!("{:?}", block_2_hash); + let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); + + // Import block 7 and check it. + let block_7_hash = import_block(client.clone(), block_6_hash, 3).await.hash(); + check_new_and_best_block_events!(&mut sub, block_7_hash, block_6_hash); + + // Finalize the block 7. + client.finalize_block(block_7_hash, None).unwrap(); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![format!("{:?}", block_7_hash)], + pruned_block_hashes: vec![format!("{:?}", block_2_hash), format!("{:?}", block_3_hash)], + }); + assert_eq!(event, expected); + + // Check block 8. + let block_8_hash = import_block(client.clone(), block_7_hash, 4).await.hash(); + check_new_and_best_block_events!(&mut sub, block_8_hash, block_7_hash); + + // Finalize the block 8. + client.finalize_block(block_8_hash, None).unwrap(); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![format!("{:?}", block_8_hash)], + pruned_block_hashes: vec![format!("{:?}", block_4_hash), format!("{:?}", block_5_hash)], + }); + assert_eq!(event, expected); +} -- GitLab From 7a2c9d4a9ae495e9165975da234ff4b67f5bfeb4 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 17 Apr 2024 18:52:00 +0300 Subject: [PATCH 019/107] Fix nostd build of several crates (#4060) Preparation for https://github.com/paritytech/polkadot-sdk/pull/3935 Changes: - Add some `default-features = false` for the case that a crate and that dependency both support nostd builds. - Shuffle files around of some benchmarking-only crates. These conditionally disabled the `cfg_attr` for nostd and pulled in libstd. Example [here](https://github.com/ggwpez/zepter/pull/95). The actual logic is moved into a `inner.rs` to preserve nostd capability of the crate in case the benchmarking feature is disabled. - Add some `use sp_std::vec` where needed. - Remove some `optional = true` in cases where it was not optional. - Removed one superfluous `cfg_attr(not(feature = "std"), no_std..`. All in all this should be logical no-op. --------- Signed-off-by: Oliver Tale-Yazdi --- .../pallets/session-benchmarking/src/inner.rs | 42 + .../pallets/session-benchmarking/src/lib.rs | 34 +- .../assets/asset-hub-rococo/Cargo.toml | 10 +- .../assets/asset-hub-rococo/src/lib.rs | 19 - .../glutton/glutton-westend/Cargo.toml | 4 +- .../primitives/parachain-inherent/Cargo.toml | 6 +- polkadot/primitives/Cargo.toml | 3 +- polkadot/runtime/parachains/Cargo.toml | 3 +- .../xcm-executor/integration-tests/src/lib.rs | 1 - prdoc/pr_4060.prdoc | 54 ++ substrate/frame/Cargo.toml | 2 +- substrate/frame/atomic-swap/src/lib.rs | 1 + .../benchmarking/src/inner.rs | 89 ++ .../benchmarking/src/lib.rs | 74 +- substrate/frame/examples/dev-mode/src/lib.rs | 1 + .../frame/examples/offchain-worker/Cargo.toml | 2 +- substrate/frame/indices/Cargo.toml | 3 +- substrate/frame/nomination-pools/Cargo.toml | 4 +- .../benchmarking/src/inner.rs | 846 ++++++++++++++++++ .../nomination-pools/benchmarking/src/lib.rs | 835 +---------------- .../frame/offences/benchmarking/src/inner.rs | 250 ++++++ .../frame/offences/benchmarking/src/lib.rs | 238 +---- .../frame/offences/benchmarking/src/mock.rs | 5 +- substrate/frame/root-offences/Cargo.toml | 5 +- substrate/frame/root-offences/src/lib.rs | 5 +- .../frame/session/benchmarking/src/inner.rs | 162 ++++ .../frame/session/benchmarking/src/lib.rs | 152 +--- substrate/frame/src/lib.rs | 4 +- .../frame/system/benchmarking/src/inner.rs | 230 +++++ .../frame/system/benchmarking/src/lib.rs | 220 +---- substrate/frame/try-runtime/src/inner.rs | 50 ++ substrate/frame/try-runtime/src/lib.rs | 35 +- .../primitives/consensus/babe/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/session/Cargo.toml | 4 +- .../transaction-storage-proof/Cargo.toml | 4 +- 36 files changed, 1800 insertions(+), 1601 deletions(-) create mode 100644 cumulus/pallets/session-benchmarking/src/inner.rs create mode 100644 prdoc/pr_4060.prdoc create mode 100644 substrate/frame/election-provider-support/benchmarking/src/inner.rs create mode 100644 substrate/frame/nomination-pools/benchmarking/src/inner.rs create mode 100644 substrate/frame/offences/benchmarking/src/inner.rs create mode 100644 substrate/frame/session/benchmarking/src/inner.rs create mode 100644 substrate/frame/system/benchmarking/src/inner.rs create mode 100644 substrate/frame/try-runtime/src/inner.rs diff --git a/cumulus/pallets/session-benchmarking/src/inner.rs b/cumulus/pallets/session-benchmarking/src/inner.rs new file mode 100644 index 00000000000..cffd0776f3d --- /dev/null +++ b/cumulus/pallets/session-benchmarking/src/inner.rs @@ -0,0 +1,42 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for pallet-session. + +use sp_std::{prelude::*, vec}; + +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_system::RawOrigin; +use pallet_session::*; +use parity_scale_codec::Decode; +pub struct Pallet(pallet_session::Pallet); +pub trait Config: pallet_session::Config {} + +benchmarks! { + set_keys { + let caller: T::AccountId = whitelisted_caller(); + frame_system::Pallet::::inc_providers(&caller); + let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + }: _(RawOrigin::Signed(caller), keys, proof) + + purge_keys { + let caller: T::AccountId = whitelisted_caller(); + frame_system::Pallet::::inc_providers(&caller); + let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + let _t = pallet_session::Pallet::::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof); + }: _(RawOrigin::Signed(caller)) +} diff --git a/cumulus/pallets/session-benchmarking/src/lib.rs b/cumulus/pallets/session-benchmarking/src/lib.rs index f474def6b13..a95d6fb7d59 100644 --- a/cumulus/pallets/session-benchmarking/src/lib.rs +++ b/cumulus/pallets/session-benchmarking/src/lib.rs @@ -1,3 +1,5 @@ +// This file is part of Substrate. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -13,31 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Benchmarking setup for pallet-session -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "runtime-benchmarks")] -use sp_std::{prelude::*, vec}; +//! Benchmarks for the Session Pallet. +// This is separated into its own crate due to cyclic dependency issues. -use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_system::RawOrigin; -use pallet_session::*; -use parity_scale_codec::Decode; -pub struct Pallet(pallet_session::Pallet); -pub trait Config: pallet_session::Config {} +#![cfg_attr(not(feature = "std"), no_std)] -benchmarks! { - set_keys { - let caller: T::AccountId = whitelisted_caller(); - frame_system::Pallet::::inc_providers(&caller); - let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - }: _(RawOrigin::Signed(caller), keys, proof) +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; - purge_keys { - let caller: T::AccountId = whitelisted_caller(); - frame_system::Pallet::::inc_providers(&caller); - let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - let _t = pallet_session::Pallet::::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof); - }: _(RawOrigin::Signed(caller)) -} +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index e8be734214f..47574783810 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -37,7 +37,7 @@ pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } +pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } @@ -102,14 +102,6 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", [features] default = ["std"] -# When enabled the `state_version` is set to `1`. -# This means that the chain will start using the new state format. The migration is lazy, so -# it requires to write a storage value to use the new state format. To migrate all the other -# storage values that aren't touched the state migration pallet is added as well. -# This pallet will migrate the entire state, controlled through some account. -# -# This feature should be removed when the main-net will be migrated. -state-trie-version-1 = ["pallet-state-trie-migration"] runtime-benchmarks = [ "assets-common/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 30e211a8f1d..5cb29343a1c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -107,7 +107,6 @@ impl_opaque_keys! { } } -#[cfg(feature = "state-trie-version-1")] #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), @@ -120,19 +119,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -#[cfg(not(feature = "state-trie-version-1"))] -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), - authoring_version: 1, - spec_version: 1_010_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 14, - state_version: 0, -}; - /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { @@ -953,7 +939,6 @@ construct_runtime!( PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, - #[cfg(feature = "state-trie-version-1")] StateTrieMigration: pallet_state_trie_migration = 70, // TODO: the pallet instance should be removed once all pools have migrated @@ -1695,7 +1680,6 @@ cumulus_pallet_parachain_system::register_validate_block! { BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } -#[cfg(feature = "state-trie-version-1")] parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) pub const MigrationSignedDepositPerItem: Balance = CENTS; @@ -1703,7 +1687,6 @@ parameter_types! { pub const MigrationMaxKeyLen: u32 = 512; } -#[cfg(feature = "state-trie-version-1")] impl pallet_state_trie_migration::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; @@ -1721,13 +1704,11 @@ impl pallet_state_trie_migration::Config for Runtime { type MaxKeyLen = MigrationMaxKeyLen; } -#[cfg(feature = "state-trie-version-1")] frame_support::ord_parameter_types! { pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); } -#[cfg(feature = "state-trie-version-1")] #[test] fn ensure_key_ss58() { use frame_support::traits::SortedMembers; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index fe9cd25841b..808bed38732 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -22,8 +22,8 @@ frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/r frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true } +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index fcf4c93bc2f..4da561661b6 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -17,8 +17,8 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true } +sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true, default-features = false } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true, default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } @@ -34,6 +34,8 @@ std = [ "scale-info/std", "sp-core/std", "sp-inherents/std", + "sp-runtime?/std", + "sp-state-machine?/std", "sp-std/std", "sp-trie/std", ] diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 004fa62acf3..99800afc37f 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -26,7 +26,7 @@ sp-arithmetic = { path = "../../substrate/primitives/arithmetic", default-featur sp-authority-discovery = { path = "../../substrate/primitives/authority-discovery", default-features = false, features = ["serde"] } sp-consensus-slots = { path = "../../substrate/primitives/consensus/slots", default-features = false, features = ["serde"] } sp-io = { path = "../../substrate/primitives/io", default-features = false } -sp-keystore = { path = "../../substrate/primitives/keystore", optional = true } +sp-keystore = { path = "../../substrate/primitives/keystore", optional = true, default-features = false } sp-staking = { path = "../../substrate/primitives/staking", default-features = false, features = ["serde"] } sp-std = { package = "sp-std", path = "../../substrate/primitives/std", default-features = false } @@ -53,6 +53,7 @@ std = [ "sp-consensus-slots/std", "sp-io/std", "sp-keystore", + "sp-keystore?/std", "sp-staking/std", "sp-std/std", ] diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index dff8549f29f..402c6e487a1 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true } +sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true, default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false, optional = true } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false, optional = true } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } @@ -108,6 +108,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-keystore", + "sp-keystore?/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 1d1ee40d092..279d7118f8c 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#![cfg_attr(not(feature = "std"), no_std)] #![cfg(test)] use codec::Encode; diff --git a/prdoc/pr_4060.prdoc b/prdoc/pr_4060.prdoc new file mode 100644 index 00000000000..621620a4489 --- /dev/null +++ b/prdoc/pr_4060.prdoc @@ -0,0 +1,54 @@ +title: "Fix nostd build of several crates" + +doc: + - audience: Runtime Dev + description: | + Fixes feature and dependency configuration of several crate. This should allow for better no-std build capabilities. + +crates: + - name: cumulus-pallet-session-benchmarking + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: glutton-westend-runtime + bump: patch + - name: cumulus-primitives-parachain-inherent + bump: patch + - name: polkadot-primitives + bump: patch + - name: polkadot-runtime-parachains + bump: patch + - name: xcm-executor-integration-tests + bump: patch + - name: pallet-atomic-swap + bump: patch + - name: pallet-election-provider-support-benchmarking + bump: patch + - name: pallet-dev-mode + bump: patch + - name: pallet-example-offchain-worker + bump: patch + - name: pallet-indices + bump: patch + - name: pallet-nomination-pools + bump: patch + - name: pallet-nomination-pools-benchmarking + bump: patch + - name: pallet-offences-benchmarking + bump: patch + - name: pallet-root-offences + bump: patch + - name: pallet-session-benchmarking + bump: patch + - name: frame-system-benchmarking + bump: patch + - name: sp-consensus-babe + bump: patch + - name: sp-consensus-babe + bump: patch + - name: sp-core + bump: patch + - name: sp-session + bump: patch + - name: sp-transaction-storage-proof + bump: patch diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 919d6d17ce8..84bab86581c 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" license = "Apache-2.0" homepage = "paritytech.github.io" repository.workspace = true -description = "The single package to get you started with building frame pallets and runtimes" +description = "Experimental: The single package to get you started with building frame pallets and runtimes" publish = false [lints] diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs index 609903e67e3..dc0300dc1a5 100644 --- a/substrate/frame/atomic-swap/src/lib.rs +++ b/substrate/frame/atomic-swap/src/lib.rs @@ -58,6 +58,7 @@ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeDebug; +use sp_std::vec::Vec; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs new file mode 100644 index 00000000000..4722680cfcc --- /dev/null +++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Election provider support pallet benchmarking. +//! This is separated into its own crate to avoid bloating the size of the runtime. + +use codec::Decode; +use frame_benchmarking::v1::benchmarks; +use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; +use sp_std::vec::Vec; + +pub struct Pallet(frame_system::Pallet); +pub trait Config: frame_system::Config {} + +const VOTERS: [u32; 2] = [1_000, 2_000]; +const TARGETS: [u32; 2] = [500, 1_000]; +const VOTES_PER_VOTER: [u32; 2] = [5, 16]; + +const SEED: u32 = 999; +fn set_up_voters_targets( + voters_len: u32, + targets_len: u32, + degree: usize, +) -> (Vec<(AccountId, u64, impl IntoIterator)>, Vec) { + // fill targets. + let mut targets = (0..targets_len) + .map(|i| frame_benchmarking::account::("Target", i, SEED)) + .collect::>(); + assert!(targets.len() > degree, "we should always have enough voters to fill"); + targets.truncate(degree); + + // fill voters. + let voters = (0..voters_len) + .map(|i| { + let voter = frame_benchmarking::account::("Voter", i, SEED); + (voter, 1_000, targets.clone()) + }) + .collect::>(); + + (voters, targets) +} + +benchmarks! { + phragmen { + // number of votes in snapshot. + let v in (VOTERS[0]) .. VOTERS[1]; + // number of targets in snapshot. + let t in (TARGETS[0]) .. TARGETS[1]; + // number of votes per voter (ie the degree). + let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; + + let (voters, targets) = set_up_voters_targets::(v, t, d as usize); + }: { + assert!( + SequentialPhragmen:: + ::solve(d as usize, targets, voters).is_ok() + ); + } + + phragmms { + // number of votes in snapshot. + let v in (VOTERS[0]) .. VOTERS[1]; + // number of targets in snapshot. + let t in (TARGETS[0]) .. TARGETS[1]; + // number of votes per voter (ie the degree). + let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; + + let (voters, targets) = set_up_voters_targets::(v, t, d as usize); + }: { + assert!( + PhragMMS:: + ::solve(d as usize, targets, voters).is_ok() + ); + } +} diff --git a/substrate/frame/election-provider-support/benchmarking/src/lib.rs b/substrate/frame/election-provider-support/benchmarking/src/lib.rs index 6c75aed0a91..78b226e52af 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/lib.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/lib.rs @@ -16,77 +16,11 @@ // limitations under the License. //! Election provider support pallet benchmarking. -//! This is separated into its own crate to avoid bloating the size of the runtime. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -use codec::Decode; -use frame_benchmarking::v1::benchmarks; -use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; -use sp_std::vec::Vec; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -pub struct Pallet(frame_system::Pallet); -pub trait Config: frame_system::Config {} - -const VOTERS: [u32; 2] = [1_000, 2_000]; -const TARGETS: [u32; 2] = [500, 1_000]; -const VOTES_PER_VOTER: [u32; 2] = [5, 16]; - -const SEED: u32 = 999; -fn set_up_voters_targets( - voters_len: u32, - targets_len: u32, - degree: usize, -) -> (Vec<(AccountId, u64, impl IntoIterator)>, Vec) { - // fill targets. - let mut targets = (0..targets_len) - .map(|i| frame_benchmarking::account::("Target", i, SEED)) - .collect::>(); - assert!(targets.len() > degree, "we should always have enough voters to fill"); - targets.truncate(degree); - - // fill voters. - let voters = (0..voters_len) - .map(|i| { - let voter = frame_benchmarking::account::("Voter", i, SEED); - (voter, 1_000, targets.clone()) - }) - .collect::>(); - - (voters, targets) -} - -benchmarks! { - phragmen { - // number of votes in snapshot. - let v in (VOTERS[0]) .. VOTERS[1]; - // number of targets in snapshot. - let t in (TARGETS[0]) .. TARGETS[1]; - // number of votes per voter (ie the degree). - let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; - - let (voters, targets) = set_up_voters_targets::(v, t, d as usize); - }: { - assert!( - SequentialPhragmen:: - ::solve(d as usize, targets, voters).is_ok() - ); - } - - phragmms { - // number of votes in snapshot. - let v in (VOTERS[0]) .. VOTERS[1]; - // number of targets in snapshot. - let t in (TARGETS[0]) .. TARGETS[1]; - // number of votes per voter (ie the degree). - let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; - - let (voters, targets) = set_up_voters_targets::(v, t, d as usize); - }: { - assert!( - PhragMMS:: - ::solve(d as usize, targets, voters).is_ok() - ); - } -} +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; diff --git a/substrate/frame/examples/dev-mode/src/lib.rs b/substrate/frame/examples/dev-mode/src/lib.rs index d57e7a5b76b..15f1a4b5d61 100644 --- a/substrate/frame/examples/dev-mode/src/lib.rs +++ b/substrate/frame/examples/dev-mode/src/lib.rs @@ -30,6 +30,7 @@ use frame_support::dispatch::DispatchResult; use frame_system::ensure_signed; +use sp_std::{vec, vec::Vec}; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 468af0345ca..9363f753352 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -24,7 +24,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } -sp-keystore = { path = "../../../primitives/keystore", optional = true } +sp-keystore = { path = "../../../primitives/keystore", optional = true, default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 7b14bf358f1..8684f347270 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -23,7 +23,7 @@ frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-keyring = { path = "../../primitives/keyring", optional = true, default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } @@ -42,6 +42,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-keyring", + "sp-keyring?/std", "sp-runtime/std", "sp-std/std", ] diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 55e9ef6fbd3..eddcc8e4e1d 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -34,8 +34,8 @@ sp-io = { path = "../../primitives/io", default-features = false } log = { workspace = true } # Optional: use for testing and/or fuzzing -pallet-balances = { path = "../balances", optional = true } -sp-tracing = { path = "../../primitives/tracing", optional = true } +pallet-balances = { path = "../balances", optional = true, default-features = false } +sp-tracing = { path = "../../primitives/tracing", optional = true, default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs new file mode 100644 index 00000000000..277060e7f64 --- /dev/null +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -0,0 +1,846 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the nomination pools coupled with the staking and bags list pallets. + +use frame_benchmarking::v1::{account, whitelist_account}; +use frame_election_provider_support::SortedListProvider; +use frame_support::{ + assert_ok, ensure, + traits::{ + fungible::{Inspect, Mutate, Unbalanced}, + Get, + }, +}; +use frame_system::RawOrigin as RuntimeOrigin; +use pallet_nomination_pools::{ + BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, + Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, + MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, + Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, +}; +use pallet_staking::MaxNominationsOf; +use sp_runtime::{ + traits::{Bounded, StaticLookup, Zero}, + Perbill, +}; +use sp_staking::{EraIndex, StakingInterface}; +use sp_std::{vec, vec::Vec}; +// `frame_benchmarking::benchmarks!` macro needs this +use pallet_nomination_pools::Call; + +type CurrencyOf = ::Currency; + +const USER_SEED: u32 = 0; +const MAX_SPANS: u32 = 100; + +pub(crate) type VoterBagsListInstance = pallet_bags_list::Instance1; +pub trait Config: + pallet_nomination_pools::Config + + pallet_staking::Config + + pallet_bags_list::Config +{ +} + +pub struct Pallet(Pools); + +fn create_funded_user_with_balance( + string: &'static str, + n: u32, + balance: BalanceOf, +) -> T::AccountId { + let user = account(string, n, USER_SEED); + T::Currency::set_balance(&user, balance); + user +} + +// Create a bonded pool account, bonding `balance` and giving the account `balance * 2` free +// balance. +fn create_pool_account( + n: u32, + balance: BalanceOf, + commission: Option, +) -> (T::AccountId, T::AccountId) { + let ed = CurrencyOf::::minimum_balance(); + let pool_creator: T::AccountId = + create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); + let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); + + Pools::::create( + RuntimeOrigin::Signed(pool_creator.clone()).into(), + balance, + pool_creator_lookup.clone(), + pool_creator_lookup.clone(), + pool_creator_lookup, + ) + .unwrap(); + + if let Some(c) = commission { + let pool_id = pallet_nomination_pools::LastPoolId::::get(); + Pools::::set_commission( + RuntimeOrigin::Signed(pool_creator.clone()).into(), + pool_id, + Some((c, pool_creator.clone())), + ) + .expect("pool just created, commission can be set by root; qed"); + } + + let pool_account = pallet_nomination_pools::BondedPools::::iter() + .find(|(_, bonded_pool)| bonded_pool.roles.depositor == pool_creator) + .map(|(pool_id, _)| Pools::::create_bonded_account(pool_id)) + .expect("pool_creator created a pool above"); + + (pool_creator, pool_account) +} + +fn vote_to_balance( + vote: u64, +) -> Result, &'static str> { + vote.try_into().map_err(|_| "could not convert u64 to Balance") +} + +#[allow(unused)] +struct ListScenario { + /// Stash/Controller that is expected to be moved. + origin1: T::AccountId, + creator1: T::AccountId, + dest_weight: BalanceOf, + origin1_member: Option, +} + +impl ListScenario { + /// An expensive scenario for bags-list implementation: + /// + /// - the node to be updated (r) is the head of a bag that has at least one other node. The bag + /// itself will need to be read and written to update its head. The node pointed to by r.next + /// will need to be read and written as it will need to have its prev pointer updated. Note + /// that there are two other worst case scenarios for bag removal: 1) the node is a tail and + /// 2) the node is a middle node with prev and next; all scenarios end up with the same number + /// of storage reads and writes. + /// + /// - the destination bag has at least one node, which will need its next pointer updated. + pub(crate) fn new( + origin_weight: BalanceOf, + is_increase: bool, + ) -> Result { + ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); + + ensure!( + pallet_nomination_pools::MaxPools::::get().unwrap_or(0) >= 3, + "must allow at least three pools for benchmarks" + ); + + // Burn the entire issuance. + CurrencyOf::::set_total_issuance(Zero::zero()); + + // Create accounts with the origin weight + let (pool_creator1, pool_origin1) = + create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); + + T::Staking::nominate( + &pool_origin1, + // NOTE: these don't really need to be validators. + vec![account("random_validator", 0, USER_SEED)], + )?; + + let (_, pool_origin2) = + create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); + + T::Staking::nominate( + &pool_origin2, + vec![account("random_validator", 0, USER_SEED)].clone(), + )?; + + // Find a destination weight that will trigger the worst case scenario + let dest_weight_as_vote = ::VoterList::score_update_worst_case( + &pool_origin1, + is_increase, + ); + + let dest_weight: BalanceOf = + dest_weight_as_vote.try_into().map_err(|_| "could not convert u64 to Balance")?; + + // Create an account with the worst case destination weight + let (_, pool_dest1) = + create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); + + T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; + + let weight_of = pallet_staking::Pallet::::weight_of_fn(); + assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); + assert_eq!(vote_to_balance::(weight_of(&pool_origin2)).unwrap(), origin_weight); + assert_eq!(vote_to_balance::(weight_of(&pool_dest1)).unwrap(), dest_weight); + + Ok(ListScenario { + origin1: pool_origin1, + creator1: pool_creator1, + dest_weight, + origin1_member: None, + }) + } + + fn add_joiner(mut self, amount: BalanceOf) -> Self { + let amount = MinJoinBond::::get() + .max(CurrencyOf::::minimum_balance()) + // Max `amount` with minimum thresholds for account balance and joining a pool + // to ensure 1) the user can be created and 2) can join the pool + .max(amount); + + let joiner: T::AccountId = account("joiner", USER_SEED, 0); + self.origin1_member = Some(joiner.clone()); + CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); + + let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); + + // Unbond `amount` from the underlying pool account so when the member joins + // we will maintain `current_bonded`. + T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); + + // Account pool points for the unbonded balance. + BondedPools::::mutate(&1, |maybe_pool| { + maybe_pool.as_mut().map(|pool| pool.points -= amount) + }); + + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), amount, 1).unwrap(); + + // check that the vote weight is still the same as the original bonded + let weight_of = pallet_staking::Pallet::::weight_of_fn(); + assert_eq!(vote_to_balance::(weight_of(&self.origin1)).unwrap(), original_bonded); + + // check the member was added correctly + let member = PoolMembers::::get(&joiner).unwrap(); + assert_eq!(member.points, amount); + assert_eq!(member.pool_id, 1); + + self + } +} + +frame_benchmarking::benchmarks! { + join { + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + + // setup the worst case list scenario. + let scenario = ListScenario::::new(origin_weight, true)?; + assert_eq!( + T::Staking::active_stake(&scenario.origin1).unwrap(), + origin_weight + ); + + let max_additional = scenario.dest_weight - origin_weight; + let joiner_free = CurrencyOf::::minimum_balance() + max_additional; + + let joiner: T::AccountId + = create_funded_user_with_balance::("joiner", 0, joiner_free); + + whitelist_account!(joiner); + }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) + verify { + assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); + assert_eq!( + T::Staking::active_stake(&scenario.origin1).unwrap(), + scenario.dest_weight + ); + } + + bond_extra_transfer { + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let scenario = ListScenario::::new(origin_weight, true)?; + let extra = scenario.dest_weight - origin_weight; + + // creator of the src pool will bond-extra, bumping itself to dest bag. + + }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) + verify { + assert!( + T::Staking::active_stake(&scenario.origin1).unwrap() >= + scenario.dest_weight + ); + } + + bond_extra_other { + let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); + + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let scenario = ListScenario::::new(origin_weight, true)?; + let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); + + // set claim preferences to `PermissionlessAll` to any account to bond extra on member's behalf. + let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); + + // transfer exactly `extra` to the depositor of the src pool (1), + let reward_account1 = Pools::::create_reward_account(1); + assert!(extra >= CurrencyOf::::minimum_balance()); + let _ = CurrencyOf::::mint_into(&reward_account1, extra); + + }: _(RuntimeOrigin::Signed(claimer), T::Lookup::unlookup(scenario.creator1.clone()), BondExtra::Rewards) + verify { + // commission of 50% deducted here. + assert!( + T::Staking::active_stake(&scenario.origin1).unwrap() >= + scenario.dest_weight / 2u32.into() + ); + } + + claim_payout { + let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); + let commission = Perbill::from_percent(50); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let ed = CurrencyOf::::minimum_balance(); + let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); + let reward_account = Pools::::create_reward_account(1); + + // Send funds to the reward account of the pool + CurrencyOf::::set_balance(&reward_account, ed + origin_weight); + + // set claim preferences to `PermissionlessAll` so any account can claim rewards on member's + // behalf. + let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(depositor.clone()).into(), ClaimPermission::PermissionlessAll); + + // Sanity check + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + ); + + whitelist_account!(depositor); + }:claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()) + verify { + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + commission * origin_weight + ); + assert_eq!( + CurrencyOf::::balance(&reward_account), + ed + commission * origin_weight + ); + } + + + unbond { + // The weight the nominator will start at. The value used here is expected to be + // significantly higher than the first position in a list (e.g. the first bag threshold). + let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); + let scenario = ListScenario::::new(origin_weight, false)?; + let amount = origin_weight - scenario.dest_weight; + + let scenario = scenario.add_joiner(amount); + let member_id = scenario.origin1_member.unwrap().clone(); + let member_id_lookup = T::Lookup::unlookup(member_id.clone()); + let all_points = PoolMembers::::get(&member_id).unwrap().points; + whitelist_account!(member_id); + }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) + verify { + let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); + // We at least went down to the destination bag + assert!(bonded_after <= scenario.dest_weight); + let member = PoolMembers::::get( + &member_id + ) + .unwrap(); + assert_eq!( + member.unbonding_eras.keys().cloned().collect::>(), + vec![0 + T::Staking::bonding_duration()] + ); + assert_eq!( + member.unbonding_eras.values().cloned().collect::>(), + vec![all_points] + ); + } + + pool_withdraw_unbonded { + let s in 0 .. MAX_SPANS; + + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Add a new member + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); + + // Sanity check join worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + min_join_bond + ); + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); + + // Unbond the new member + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + + // Sanity check that unbond worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + ); + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); + // Set the current era + pallet_staking::CurrentEra::::put(EraIndex::max_value()); + + // Add `s` count of slashing spans to storage. + pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); + whitelist_account!(pool_account); + }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) + verify { + // The joiners funds didn't change + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); + // The unlocking chunk was removed + assert_eq!(pallet_staking::Ledger::::get(pool_account).unwrap().unlocking.len(), 0); + } + + withdraw_unbonded_update { + let s in 0 .. MAX_SPANS; + + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Add a new member + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); + + // Sanity check join worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + min_join_bond + ); + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); + + // Unbond the new member + pallet_staking::CurrentEra::::put(0); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + + // Sanity check that unbond worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + ); + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); + + // Set the current era to ensure we can withdraw unbonded funds + pallet_staking::CurrentEra::::put(EraIndex::max_value()); + + pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); + whitelist_account!(joiner); + }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) + verify { + assert_eq!( + CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into() + ); + // The unlocking chunk was removed + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 0); + } + + withdraw_unbonded_kill { + let s in 0 .. MAX_SPANS; + + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // We set the pool to the destroying state so the depositor can leave + BondedPools::::try_mutate(&1, |maybe_bonded_pool| { + maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { + bonded_pool.state = PoolState::Destroying; + }) + }) + .unwrap(); + + // Unbond the creator + pallet_staking::CurrentEra::::put(0); + // Simulate some rewards so we can check if the rewards storage is cleaned up. We check this + // here to ensure the complete flow for destroying a pool works - the reward pool account + // should never exist by time the depositor withdraws so we test that it gets cleaned + // up when unbonding. + let reward_account = Pools::::create_reward_account(1); + assert!(frame_system::Account::::contains_key(&reward_account)); + Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); + + // Sanity check that unbond worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + Zero::zero() + ); + assert_eq!( + CurrencyOf::::balance(&pool_account), + min_create_bond + ); + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); + + // Set the current era to ensure we can withdraw unbonded funds + pallet_staking::CurrentEra::::put(EraIndex::max_value()); + + // Some last checks that storage items we expect to get cleaned up are present + assert!(pallet_staking::Ledger::::contains_key(&pool_account)); + assert!(BondedPools::::contains_key(&1)); + assert!(SubPoolsStorage::::contains_key(&1)); + assert!(RewardPools::::contains_key(&1)); + assert!(PoolMembers::::contains_key(&depositor)); + assert!(frame_system::Account::::contains_key(&reward_account)); + + whitelist_account!(depositor); + }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) + verify { + // Pool removal worked + assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); + assert!(!BondedPools::::contains_key(&1)); + assert!(!SubPoolsStorage::::contains_key(&1)); + assert!(!RewardPools::::contains_key(&1)); + assert!(!PoolMembers::::contains_key(&depositor)); + assert!(!frame_system::Account::::contains_key(&pool_account)); + assert!(!frame_system::Account::::contains_key(&reward_account)); + + // Funds where transferred back correctly + assert_eq!( + CurrencyOf::::balance(&depositor), + // gets bond back + rewards collecting when unbonding + min_create_bond * 2u32.into() + CurrencyOf::::minimum_balance() + ); + } + + create { + let min_create_bond = Pools::::depositor_min_bond(); + let depositor: T::AccountId = account("depositor", USER_SEED, 0); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // Give the depositor some balance to bond + CurrencyOf::::set_balance(&depositor, min_create_bond * 2u32.into()); + + // Make sure no Pools exist at a pre-condition for our verify checks + assert_eq!(RewardPools::::count(), 0); + assert_eq!(BondedPools::::count(), 0); + + whitelist_account!(depositor); + }: _( + RuntimeOrigin::Signed(depositor.clone()), + min_create_bond, + depositor_lookup.clone(), + depositor_lookup.clone(), + depositor_lookup + ) + verify { + assert_eq!(RewardPools::::count(), 1); + assert_eq!(BondedPools::::count(), 1); + let (_, new_pool) = BondedPools::::iter().next().unwrap(); + assert_eq!( + new_pool, + BondedPoolInner { + commission: Commission::default(), + member_counter: 1, + points: min_create_bond, + roles: PoolRoles { + depositor: depositor.clone(), + root: Some(depositor.clone()), + nominator: Some(depositor.clone()), + bouncer: Some(depositor.clone()), + }, + state: PoolState::Open, + } + ); + assert_eq!( + T::Staking::active_stake(&Pools::::create_bonded_account(1)), + Ok(min_create_bond) + ); + } + + nominate { + let n in 1 .. MaxNominationsOf::::get(); + + // Create a pool + let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Create some accounts to nominate. For the sake of benchmarking they don't need to be + // actual validators + let validators: Vec<_> = (0..n) + .map(|i| account("stash", USER_SEED, i)) + .collect(); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) + verify { + assert_eq!(RewardPools::::count(), 1); + assert_eq!(BondedPools::::count(), 1); + let (_, new_pool) = BondedPools::::iter().next().unwrap(); + assert_eq!( + new_pool, + BondedPoolInner { + commission: Commission::default(), + member_counter: 1, + points: min_create_bond, + roles: PoolRoles { + depositor: depositor.clone(), + root: Some(depositor.clone()), + nominator: Some(depositor.clone()), + bouncer: Some(depositor.clone()), + }, + state: PoolState::Open, + } + ); + assert_eq!( + T::Staking::active_stake(&Pools::::create_bonded_account(1)), + Ok(min_create_bond) + ); + } + + set_state { + // Create a pool + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + BondedPools::::mutate(&1, |maybe_pool| { + // Force the pool into an invalid state + maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); + }); + + let caller = account("caller", 0, USER_SEED); + whitelist_account!(caller); + }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) + verify { + assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); + } + + set_metadata { + let n in 1 .. ::MaxMetadataLen::get(); + + // Create a pool + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + // Create metadata of the max possible size + let metadata: Vec = (0..n).map(|_| 42).collect(); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) + verify { + assert_eq!(Metadata::::get(&1), metadata); + } + + set_configs { + }:_( + RuntimeOrigin::Root, + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(Perbill::max_value()) + ) verify { + assert_eq!(MinJoinBond::::get(), BalanceOf::::max_value()); + assert_eq!(MinCreateBond::::get(), BalanceOf::::max_value()); + assert_eq!(MaxPools::::get(), Some(u32::MAX)); + assert_eq!(MaxPoolMembers::::get(), Some(u32::MAX)); + assert_eq!(MaxPoolMembersPerPool::::get(), Some(u32::MAX)); + assert_eq!(GlobalMaxCommission::::get(), Some(Perbill::max_value())); + } + + update_roles { + let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; + let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); + }:_( + RuntimeOrigin::Signed(root.clone()), + first_id, + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()) + ) verify { + assert_eq!( + pallet_nomination_pools::BondedPools::::get(first_id).unwrap().roles, + pallet_nomination_pools::PoolRoles { + depositor: root, + nominator: Some(random.clone()), + bouncer: Some(random.clone()), + root: Some(random), + }, + ) + } + + chill { + // Create a pool + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + // Nominate with the pool. + let validators: Vec<_> = (0..MaxNominationsOf::::get()) + .map(|i| account("stash", USER_SEED, i)) + .collect(); + + assert_ok!(T::Staking::nominate(&pool_account, validators)); + assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1) + verify { + assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); + } + + set_commission { + // Create a pool - do not set a commission yet. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + // set a max commission + Pools::::set_commission_max(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), Perbill::from_percent(50)).unwrap(); + // set a change rate + Pools::::set_commission_change_rate(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), CommissionChangeRate { + max_increase: Perbill::from_percent(20), + min_delay: 0u32.into(), + }).unwrap(); + // set a claim permission to an account. + Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(depositor.clone())) + ).unwrap(); + + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) + verify { + assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { + current: Some((Perbill::from_percent(20), depositor.clone())), + max: Some(Perbill::from_percent(50)), + change_rate: Some(CommissionChangeRate { + max_increase: Perbill::from_percent(20), + min_delay: 0u32.into() + }), + throttle_from: Some(1u32.into()), + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); + } + + set_commission_max { + // Create a pool, setting a commission that will update when max commission is set. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), Some(Perbill::from_percent(50))); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: Some((Perbill::from_percent(50), depositor)), + max: Some(Perbill::from_percent(50)), + change_rate: None, + throttle_from: Some(0u32.into()), + claim_permission: None, + }); + } + + set_commission_change_rate { + // Create a pool + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), CommissionChangeRate { + max_increase: Perbill::from_percent(50), + min_delay: 1000u32.into(), + }) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: Some(CommissionChangeRate { + max_increase: Perbill::from_percent(50), + min_delay: 1000u32.into(), + }), + throttle_from: Some(1_u32.into()), + claim_permission: None, + }); + } + + set_commission_claim_permission { + // Create a pool. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: None, + throttle_from: None, + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); + } + + set_claim_permission { + // Create a pool + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Join pool + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 4u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); + + // Sanity check join worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + min_join_bond + ); + }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) + verify { + assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); + } + + claim_commission { + let claimer: T::AccountId = account("claimer_member", USER_SEED + 4, 0); + let commission = Perbill::from_percent(50); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let ed = CurrencyOf::::minimum_balance(); + let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); + let reward_account = Pools::::create_reward_account(1); + CurrencyOf::::set_balance(&reward_account, ed + origin_weight); + + // member claims a payout to make some commission available. + let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer.clone()).into()); + // set a claim permission to an account. + let _ = Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(claimer)) + ); + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) + verify { + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + commission * origin_weight + ); + assert_eq!( + CurrencyOf::::balance(&reward_account), + ed + commission * origin_weight + ); + } + + adjust_pool_deposit { + // Create a pool + let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. + let _ = Pools::::unfreeze_pool_deposit(&Pools::::create_reward_account(1)); + assert!(&Pools::::check_ed_imbalance().is_err()); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor), 1) + verify { + assert!(&Pools::::check_ed_imbalance().is_ok()); + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Runtime + ); +} diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index f7df173ec04..45e8f1f27e9 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -17,836 +17,13 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(test)] -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -use frame_benchmarking::v1::{account, whitelist_account}; -use frame_election_provider_support::SortedListProvider; -use frame_support::{ - assert_ok, ensure, - traits::{ - fungible::{Inspect, Mutate, Unbalanced}, - Get, - }, -}; -use frame_system::RawOrigin as RuntimeOrigin; -use pallet_nomination_pools::{ - BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, - Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, - MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, - Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, -}; -use pallet_staking::MaxNominationsOf; -use sp_runtime::{ - traits::{Bounded, StaticLookup, Zero}, - Perbill, -}; -use sp_staking::{EraIndex, StakingInterface}; -use sp_std::{vec, vec::Vec}; -// `frame_benchmarking::benchmarks!` macro needs this -use pallet_nomination_pools::Call; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -type CurrencyOf = ::Currency; - -const USER_SEED: u32 = 0; -const MAX_SPANS: u32 = 100; - -type VoterBagsListInstance = pallet_bags_list::Instance1; -pub trait Config: - pallet_nomination_pools::Config - + pallet_staking::Config - + pallet_bags_list::Config -{ -} - -pub struct Pallet(Pools); - -fn create_funded_user_with_balance( - string: &'static str, - n: u32, - balance: BalanceOf, -) -> T::AccountId { - let user = account(string, n, USER_SEED); - T::Currency::set_balance(&user, balance); - user -} - -// Create a bonded pool account, bonding `balance` and giving the account `balance * 2` free -// balance. -fn create_pool_account( - n: u32, - balance: BalanceOf, - commission: Option, -) -> (T::AccountId, T::AccountId) { - let ed = CurrencyOf::::minimum_balance(); - let pool_creator: T::AccountId = - create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); - let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); - - Pools::::create( - RuntimeOrigin::Signed(pool_creator.clone()).into(), - balance, - pool_creator_lookup.clone(), - pool_creator_lookup.clone(), - pool_creator_lookup, - ) - .unwrap(); - - if let Some(c) = commission { - let pool_id = pallet_nomination_pools::LastPoolId::::get(); - Pools::::set_commission( - RuntimeOrigin::Signed(pool_creator.clone()).into(), - pool_id, - Some((c, pool_creator.clone())), - ) - .expect("pool just created, commission can be set by root; qed"); - } - - let pool_account = pallet_nomination_pools::BondedPools::::iter() - .find(|(_, bonded_pool)| bonded_pool.roles.depositor == pool_creator) - .map(|(pool_id, _)| Pools::::create_bonded_account(pool_id)) - .expect("pool_creator created a pool above"); - - (pool_creator, pool_account) -} - -fn vote_to_balance( - vote: u64, -) -> Result, &'static str> { - vote.try_into().map_err(|_| "could not convert u64 to Balance") -} - -#[allow(unused)] -struct ListScenario { - /// Stash/Controller that is expected to be moved. - origin1: T::AccountId, - creator1: T::AccountId, - dest_weight: BalanceOf, - origin1_member: Option, -} - -impl ListScenario { - /// An expensive scenario for bags-list implementation: - /// - /// - the node to be updated (r) is the head of a bag that has at least one other node. The bag - /// itself will need to be read and written to update its head. The node pointed to by r.next - /// will need to be read and written as it will need to have its prev pointer updated. Note - /// that there are two other worst case scenarios for bag removal: 1) the node is a tail and - /// 2) the node is a middle node with prev and next; all scenarios end up with the same number - /// of storage reads and writes. - /// - /// - the destination bag has at least one node, which will need its next pointer updated. - pub(crate) fn new( - origin_weight: BalanceOf, - is_increase: bool, - ) -> Result { - ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); - - ensure!( - pallet_nomination_pools::MaxPools::::get().unwrap_or(0) >= 3, - "must allow at least three pools for benchmarks" - ); - - // Burn the entire issuance. - CurrencyOf::::set_total_issuance(Zero::zero()); - - // Create accounts with the origin weight - let (pool_creator1, pool_origin1) = - create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); - - T::Staking::nominate( - &pool_origin1, - // NOTE: these don't really need to be validators. - vec![account("random_validator", 0, USER_SEED)], - )?; - - let (_, pool_origin2) = - create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); - - T::Staking::nominate( - &pool_origin2, - vec![account("random_validator", 0, USER_SEED)].clone(), - )?; - - // Find a destination weight that will trigger the worst case scenario - let dest_weight_as_vote = ::VoterList::score_update_worst_case( - &pool_origin1, - is_increase, - ); - - let dest_weight: BalanceOf = - dest_weight_as_vote.try_into().map_err(|_| "could not convert u64 to Balance")?; - - // Create an account with the worst case destination weight - let (_, pool_dest1) = - create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); - - T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; - - let weight_of = pallet_staking::Pallet::::weight_of_fn(); - assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); - assert_eq!(vote_to_balance::(weight_of(&pool_origin2)).unwrap(), origin_weight); - assert_eq!(vote_to_balance::(weight_of(&pool_dest1)).unwrap(), dest_weight); - - Ok(ListScenario { - origin1: pool_origin1, - creator1: pool_creator1, - dest_weight, - origin1_member: None, - }) - } - - fn add_joiner(mut self, amount: BalanceOf) -> Self { - let amount = MinJoinBond::::get() - .max(CurrencyOf::::minimum_balance()) - // Max `amount` with minimum thresholds for account balance and joining a pool - // to ensure 1) the user can be created and 2) can join the pool - .max(amount); - - let joiner: T::AccountId = account("joiner", USER_SEED, 0); - self.origin1_member = Some(joiner.clone()); - CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); - - let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); - - // Unbond `amount` from the underlying pool account so when the member joins - // we will maintain `current_bonded`. - T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); - - // Account pool points for the unbonded balance. - BondedPools::::mutate(&1, |maybe_pool| { - maybe_pool.as_mut().map(|pool| pool.points -= amount) - }); - - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), amount, 1).unwrap(); - - // check that the vote weight is still the same as the original bonded - let weight_of = pallet_staking::Pallet::::weight_of_fn(); - assert_eq!(vote_to_balance::(weight_of(&self.origin1)).unwrap(), original_bonded); - - // check the member was added correctly - let member = PoolMembers::::get(&joiner).unwrap(); - assert_eq!(member.points, amount); - assert_eq!(member.pool_id, 1); - - self - } -} - -frame_benchmarking::benchmarks! { - join { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - - // setup the worst case list scenario. - let scenario = ListScenario::::new(origin_weight, true)?; - assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), - origin_weight - ); - - let max_additional = scenario.dest_weight - origin_weight; - let joiner_free = CurrencyOf::::minimum_balance() + max_additional; - - let joiner: T::AccountId - = create_funded_user_with_balance::("joiner", 0, joiner_free); - - whitelist_account!(joiner); - }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) - verify { - assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); - assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), - scenario.dest_weight - ); - } - - bond_extra_transfer { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true)?; - let extra = scenario.dest_weight - origin_weight; - - // creator of the src pool will bond-extra, bumping itself to dest bag. - - }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) - verify { - assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= - scenario.dest_weight - ); - } - - bond_extra_other { - let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); - - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true)?; - let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); - - // set claim preferences to `PermissionlessAll` to any account to bond extra on member's behalf. - let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); - - // transfer exactly `extra` to the depositor of the src pool (1), - let reward_account1 = Pools::::create_reward_account(1); - assert!(extra >= CurrencyOf::::minimum_balance()); - let _ = CurrencyOf::::mint_into(&reward_account1, extra); - - }: _(RuntimeOrigin::Signed(claimer), T::Lookup::unlookup(scenario.creator1.clone()), BondExtra::Rewards) - verify { - // commission of 50% deducted here. - assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= - scenario.dest_weight / 2u32.into() - ); - } - - claim_payout { - let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); - let commission = Perbill::from_percent(50); - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let ed = CurrencyOf::::minimum_balance(); - let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); - - // Send funds to the reward account of the pool - CurrencyOf::::set_balance(&reward_account, ed + origin_weight); - - // set claim preferences to `PermissionlessAll` so any account can claim rewards on member's - // behalf. - let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(depositor.clone()).into(), ClaimPermission::PermissionlessAll); - - // Sanity check - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight - ); - - whitelist_account!(depositor); - }:claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()) - verify { - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight + commission * origin_weight - ); - assert_eq!( - CurrencyOf::::balance(&reward_account), - ed + commission * origin_weight - ); - } - - - unbond { - // The weight the nominator will start at. The value used here is expected to be - // significantly higher than the first position in a list (e.g. the first bag threshold). - let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); - let scenario = ListScenario::::new(origin_weight, false)?; - let amount = origin_weight - scenario.dest_weight; - - let scenario = scenario.add_joiner(amount); - let member_id = scenario.origin1_member.unwrap().clone(); - let member_id_lookup = T::Lookup::unlookup(member_id.clone()); - let all_points = PoolMembers::::get(&member_id).unwrap().points; - whitelist_account!(member_id); - }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) - verify { - let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); - // We at least went down to the destination bag - assert!(bonded_after <= scenario.dest_weight); - let member = PoolMembers::::get( - &member_id - ) - .unwrap(); - assert_eq!( - member.unbonding_eras.keys().cloned().collect::>(), - vec![0 + T::Staking::bonding_duration()] - ); - assert_eq!( - member.unbonding_eras.values().cloned().collect::>(), - vec![all_points] - ); - } - - pool_withdraw_unbonded { - let s in 0 .. MAX_SPANS; - - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Add a new member - let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); - let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); - - // Sanity check join worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond + min_join_bond - ); - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); - - // Unbond the new member - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); - - // Sanity check that unbond worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond - ); - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); - // Set the current era - pallet_staking::CurrentEra::::put(EraIndex::max_value()); - - // Add `s` count of slashing spans to storage. - pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); - whitelist_account!(pool_account); - }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) - verify { - // The joiners funds didn't change - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); - // The unlocking chunk was removed - assert_eq!(pallet_staking::Ledger::::get(pool_account).unwrap().unlocking.len(), 0); - } - - withdraw_unbonded_update { - let s in 0 .. MAX_SPANS; - - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Add a new member - let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); - let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); - - // Sanity check join worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond + min_join_bond - ); - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); - - // Unbond the new member - pallet_staking::CurrentEra::::put(0); - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); - - // Sanity check that unbond worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond - ); - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); - - // Set the current era to ensure we can withdraw unbonded funds - pallet_staking::CurrentEra::::put(EraIndex::max_value()); - - pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); - whitelist_account!(joiner); - }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) - verify { - assert_eq!( - CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into() - ); - // The unlocking chunk was removed - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 0); - } - - withdraw_unbonded_kill { - let s in 0 .. MAX_SPANS; - - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - let depositor_lookup = T::Lookup::unlookup(depositor.clone()); - - // We set the pool to the destroying state so the depositor can leave - BondedPools::::try_mutate(&1, |maybe_bonded_pool| { - maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { - bonded_pool.state = PoolState::Destroying; - }) - }) - .unwrap(); - - // Unbond the creator - pallet_staking::CurrentEra::::put(0); - // Simulate some rewards so we can check if the rewards storage is cleaned up. We check this - // here to ensure the complete flow for destroying a pool works - the reward pool account - // should never exist by time the depositor withdraws so we test that it gets cleaned - // up when unbonding. - let reward_account = Pools::::create_reward_account(1); - assert!(frame_system::Account::::contains_key(&reward_account)); - Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); - - // Sanity check that unbond worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - Zero::zero() - ); - assert_eq!( - CurrencyOf::::balance(&pool_account), - min_create_bond - ); - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); - - // Set the current era to ensure we can withdraw unbonded funds - pallet_staking::CurrentEra::::put(EraIndex::max_value()); - - // Some last checks that storage items we expect to get cleaned up are present - assert!(pallet_staking::Ledger::::contains_key(&pool_account)); - assert!(BondedPools::::contains_key(&1)); - assert!(SubPoolsStorage::::contains_key(&1)); - assert!(RewardPools::::contains_key(&1)); - assert!(PoolMembers::::contains_key(&depositor)); - assert!(frame_system::Account::::contains_key(&reward_account)); - - whitelist_account!(depositor); - }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) - verify { - // Pool removal worked - assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); - assert!(!BondedPools::::contains_key(&1)); - assert!(!SubPoolsStorage::::contains_key(&1)); - assert!(!RewardPools::::contains_key(&1)); - assert!(!PoolMembers::::contains_key(&depositor)); - assert!(!frame_system::Account::::contains_key(&pool_account)); - assert!(!frame_system::Account::::contains_key(&reward_account)); - - // Funds where transferred back correctly - assert_eq!( - CurrencyOf::::balance(&depositor), - // gets bond back + rewards collecting when unbonding - min_create_bond * 2u32.into() + CurrencyOf::::minimum_balance() - ); - } - - create { - let min_create_bond = Pools::::depositor_min_bond(); - let depositor: T::AccountId = account("depositor", USER_SEED, 0); - let depositor_lookup = T::Lookup::unlookup(depositor.clone()); - - // Give the depositor some balance to bond - CurrencyOf::::set_balance(&depositor, min_create_bond * 2u32.into()); - - // Make sure no Pools exist at a pre-condition for our verify checks - assert_eq!(RewardPools::::count(), 0); - assert_eq!(BondedPools::::count(), 0); - - whitelist_account!(depositor); - }: _( - RuntimeOrigin::Signed(depositor.clone()), - min_create_bond, - depositor_lookup.clone(), - depositor_lookup.clone(), - depositor_lookup - ) - verify { - assert_eq!(RewardPools::::count(), 1); - assert_eq!(BondedPools::::count(), 1); - let (_, new_pool) = BondedPools::::iter().next().unwrap(); - assert_eq!( - new_pool, - BondedPoolInner { - commission: Commission::default(), - member_counter: 1, - points: min_create_bond, - roles: PoolRoles { - depositor: depositor.clone(), - root: Some(depositor.clone()), - nominator: Some(depositor.clone()), - bouncer: Some(depositor.clone()), - }, - state: PoolState::Open, - } - ); - assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) - ); - } - - nominate { - let n in 1 .. MaxNominationsOf::::get(); - - // Create a pool - let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Create some accounts to nominate. For the sake of benchmarking they don't need to be - // actual validators - let validators: Vec<_> = (0..n) - .map(|i| account("stash", USER_SEED, i)) - .collect(); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) - verify { - assert_eq!(RewardPools::::count(), 1); - assert_eq!(BondedPools::::count(), 1); - let (_, new_pool) = BondedPools::::iter().next().unwrap(); - assert_eq!( - new_pool, - BondedPoolInner { - commission: Commission::default(), - member_counter: 1, - points: min_create_bond, - roles: PoolRoles { - depositor: depositor.clone(), - root: Some(depositor.clone()), - nominator: Some(depositor.clone()), - bouncer: Some(depositor.clone()), - }, - state: PoolState::Open, - } - ); - assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) - ); - } - - set_state { - // Create a pool - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - BondedPools::::mutate(&1, |maybe_pool| { - // Force the pool into an invalid state - maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); - }); - - let caller = account("caller", 0, USER_SEED); - whitelist_account!(caller); - }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) - verify { - assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); - } - - set_metadata { - let n in 1 .. ::MaxMetadataLen::get(); - - // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - // Create metadata of the max possible size - let metadata: Vec = (0..n).map(|_| 42).collect(); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) - verify { - assert_eq!(Metadata::::get(&1), metadata); - } - - set_configs { - }:_( - RuntimeOrigin::Root, - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(Perbill::max_value()) - ) verify { - assert_eq!(MinJoinBond::::get(), BalanceOf::::max_value()); - assert_eq!(MinCreateBond::::get(), BalanceOf::::max_value()); - assert_eq!(MaxPools::::get(), Some(u32::MAX)); - assert_eq!(MaxPoolMembers::::get(), Some(u32::MAX)); - assert_eq!(MaxPoolMembersPerPool::::get(), Some(u32::MAX)); - assert_eq!(GlobalMaxCommission::::get(), Some(Perbill::max_value())); - } - - update_roles { - let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; - let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); - }:_( - RuntimeOrigin::Signed(root.clone()), - first_id, - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()) - ) verify { - assert_eq!( - pallet_nomination_pools::BondedPools::::get(first_id).unwrap().roles, - pallet_nomination_pools::PoolRoles { - depositor: root, - nominator: Some(random.clone()), - bouncer: Some(random.clone()), - root: Some(random), - }, - ) - } - - chill { - // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - // Nominate with the pool. - let validators: Vec<_> = (0..MaxNominationsOf::::get()) - .map(|i| account("stash", USER_SEED, i)) - .collect(); - - assert_ok!(T::Staking::nominate(&pool_account, validators)); - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1) - verify { - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); - } - - set_commission { - // Create a pool - do not set a commission yet. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - // set a max commission - Pools::::set_commission_max(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), Perbill::from_percent(50)).unwrap(); - // set a change rate - Pools::::set_commission_change_rate(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), CommissionChangeRate { - max_increase: Perbill::from_percent(20), - min_delay: 0u32.into(), - }).unwrap(); - // set a claim permission to an account. - Pools::::set_commission_claim_permission( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - Some(CommissionClaimPermission::Account(depositor.clone())) - ).unwrap(); - - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) - verify { - assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(20), depositor.clone())), - max: Some(Perbill::from_percent(50)), - change_rate: Some(CommissionChangeRate { - max_increase: Perbill::from_percent(20), - min_delay: 0u32.into() - }), - throttle_from: Some(1u32.into()), - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - }); - } - - set_commission_max { - // Create a pool, setting a commission that will update when max commission is set. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), Some(Perbill::from_percent(50))); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(50), depositor)), - max: Some(Perbill::from_percent(50)), - change_rate: None, - throttle_from: Some(0u32.into()), - claim_permission: None, - }); - } - - set_commission_change_rate { - // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), CommissionChangeRate { - max_increase: Perbill::from_percent(50), - min_delay: 1000u32.into(), - }) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: None, - max: None, - change_rate: Some(CommissionChangeRate { - max_increase: Perbill::from_percent(50), - min_delay: 1000u32.into(), - }), - throttle_from: Some(1_u32.into()), - claim_permission: None, - }); - } - - set_commission_claim_permission { - // Create a pool. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: None, - max: None, - change_rate: None, - throttle_from: None, - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - }); - } - - set_claim_permission { - // Create a pool - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Join pool - let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); - let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 4u32.into()); - let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); - - // Sanity check join worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond + min_join_bond - ); - }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) - verify { - assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); - } - - claim_commission { - let claimer: T::AccountId = account("claimer_member", USER_SEED + 4, 0); - let commission = Perbill::from_percent(50); - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let ed = CurrencyOf::::minimum_balance(); - let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); - CurrencyOf::::set_balance(&reward_account, ed + origin_weight); - - // member claims a payout to make some commission available. - let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer.clone()).into()); - // set a claim permission to an account. - let _ = Pools::::set_commission_claim_permission( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - Some(CommissionClaimPermission::Account(claimer)) - ); - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) - verify { - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight + commission * origin_weight - ); - assert_eq!( - CurrencyOf::::balance(&reward_account), - ed + commission * origin_weight - ); - } - - adjust_pool_deposit { - // Create a pool - let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. - let _ = Pools::::unfreeze_pool_deposit(&Pools::::create_reward_account(1)); - assert!(&Pools::::check_ed_imbalance().is_err()); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1) - verify { - assert!(&Pools::::check_ed_imbalance().is_ok()); - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Runtime - ); -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs new file mode 100644 index 00000000000..9aa88f7a0d6 --- /dev/null +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -0,0 +1,250 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Offences pallet benchmarking. + +use sp_std::{prelude::*, vec}; + +use frame_benchmarking::v1::{account, benchmarks}; +use frame_support::traits::{Currency, Get}; +use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; + +use sp_runtime::{ + traits::{Convert, Saturating, StaticLookup}, + Perbill, +}; +use sp_staking::offence::ReportOffence; + +use pallet_babe::EquivocationOffence as BabeEquivocationOffence; +use pallet_balances::Config as BalancesConfig; +use pallet_grandpa::{ + EquivocationOffence as GrandpaEquivocationOffence, TimeSlot as GrandpaTimeSlot, +}; +use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; +use pallet_session::{ + historical::{Config as HistoricalConfig, IdentificationTuple}, + Config as SessionConfig, Pallet as Session, SessionManager, +}; +use pallet_staking::{ + Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, + RewardDestination, ValidatorPrefs, +}; + +const SEED: u32 = 0; + +const MAX_NOMINATORS: u32 = 100; + +pub struct Pallet(Offences); + +pub trait Config: + SessionConfig + + StakingConfig + + OffencesConfig + + HistoricalConfig + + BalancesConfig + + IdTupleConvert +{ +} + +/// A helper trait to make sure we can convert `IdentificationTuple` coming from historical +/// and the one required by offences. +pub trait IdTupleConvert { + /// Convert identification tuple from `historical` trait to the one expected by `offences`. + fn convert(id: IdentificationTuple) -> ::IdentificationTuple; +} + +impl IdTupleConvert for T +where + ::IdentificationTuple: From>, +{ + fn convert(id: IdentificationTuple) -> ::IdentificationTuple { + id.into() + } +} + +type LookupSourceOf = <::Lookup as StaticLookup>::Source; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +struct Offender { + pub controller: T::AccountId, + #[allow(dead_code)] + pub stash: T::AccountId, + #[allow(dead_code)] + pub nominator_stashes: Vec, +} + +fn bond_amount() -> BalanceOf { + T::Currency::minimum_balance().saturating_mul(10_000u32.into()) +} + +fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { + let stash: T::AccountId = account("stash", n, SEED); + let stash_lookup: LookupSourceOf = T::Lookup::unlookup(stash.clone()); + let reward_destination = RewardDestination::Staked; + let amount = bond_amount::(); + // add twice as much balance to prevent the account from being killed. + let free_amount = amount.saturating_mul(2u32.into()); + T::Currency::make_free_balance_be(&stash, free_amount); + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + amount, + reward_destination.clone(), + )?; + + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; + Staking::::validate(RawOrigin::Signed(stash.clone()).into(), validator_prefs)?; + + let mut individual_exposures = vec![]; + let mut nominator_stashes = vec![]; + // Create n nominators + for i in 0..nominators { + let nominator_stash: T::AccountId = + account("nominator stash", n * MAX_NOMINATORS + i, SEED); + T::Currency::make_free_balance_be(&nominator_stash, free_amount); + + Staking::::bond( + RawOrigin::Signed(nominator_stash.clone()).into(), + amount, + reward_destination.clone(), + )?; + + let selected_validators: Vec> = vec![stash_lookup.clone()]; + Staking::::nominate( + RawOrigin::Signed(nominator_stash.clone()).into(), + selected_validators, + )?; + + individual_exposures + .push(IndividualExposure { who: nominator_stash.clone(), value: amount }); + nominator_stashes.push(nominator_stash.clone()); + } + + let exposure = Exposure { total: amount * n.into(), own: amount, others: individual_exposures }; + let current_era = 0u32; + Staking::::add_era_stakers(current_era, stash.clone(), exposure); + + Ok(Offender { controller: stash.clone(), stash, nominator_stashes }) +} + +fn make_offenders( + num_offenders: u32, + num_nominators: u32, +) -> Result<(Vec>, Vec>), &'static str> { + Staking::::new_session(0); + + let mut offenders = vec![]; + for i in 0..num_offenders { + let offender = create_offender::(i + 1, num_nominators)?; + offenders.push(offender); + } + + Staking::::start_session(0); + + let id_tuples = offenders + .iter() + .map(|offender| { + ::ValidatorIdOf::convert(offender.controller.clone()) + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { + ::FullIdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification") + }) + .collect::>>(); + Ok((id_tuples, offenders)) +} + +benchmarks! { + report_offence_grandpa { + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + + // for grandpa equivocation reports the number of reporters + // and offenders is always 1 + let reporters = vec![account("reporter", 1, SEED)]; + + // make sure reporters actually get rewarded + Staking::::set_slash_reward_fraction(Perbill::one()); + + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let validator_set_count = Session::::validators().len() as u32; + + let offence = GrandpaEquivocationOffence { + time_slot: GrandpaTimeSlot { set_id: 0, round: 0 }, + session_index: 0, + validator_set_count, + offender: T::convert(offenders.pop().unwrap()), + }; + assert_eq!(System::::event_count(), 0); + }: { + let _ = Offences::::report_offence(reporters, offence); + } + verify { + // make sure that all slashes have been applied + #[cfg(test)] + assert_eq!( + System::::event_count(), 0 + + 1 // offence + + 3 // reporter (reward + endowment) + + 1 // offenders reported + + 3 // offenders slashed + + 1 // offenders chilled + + 3 * n // nominators slashed + ); + } + + report_offence_babe { + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + + // for babe equivocation reports the number of reporters + // and offenders is always 1 + let reporters = vec![account("reporter", 1, SEED)]; + + // make sure reporters actually get rewarded + Staking::::set_slash_reward_fraction(Perbill::one()); + + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let validator_set_count = Session::::validators().len() as u32; + + let offence = BabeEquivocationOffence { + slot: 0u64.into(), + session_index: 0, + validator_set_count, + offender: T::convert(offenders.pop().unwrap()), + }; + assert_eq!(System::::event_count(), 0); + }: { + let _ = Offences::::report_offence(reporters, offence); + } + verify { + // make sure that all slashes have been applied + #[cfg(test)] + assert_eq!( + System::::event_count(), 0 + + 1 // offence + + 3 // reporter (reward + endowment) + + 1 // offenders reported + + 3 // offenders slashed + + 1 // offenders chilled + + 3 * n // nominators slashed + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/offences/benchmarking/src/lib.rs b/substrate/frame/offences/benchmarking/src/lib.rs index 563aa4755ce..b08955a1332 100644 --- a/substrate/frame/offences/benchmarking/src/lib.rs +++ b/substrate/frame/offences/benchmarking/src/lib.rs @@ -17,239 +17,13 @@ //! Offences pallet benchmarking. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -use sp_std::{prelude::*, vec}; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -use frame_benchmarking::v1::{account, benchmarks}; -use frame_support::traits::{Currency, Get}; -use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; - -use sp_runtime::{ - traits::{Convert, Saturating, StaticLookup}, - Perbill, -}; -use sp_staking::offence::ReportOffence; - -use pallet_babe::EquivocationOffence as BabeEquivocationOffence; -use pallet_balances::Config as BalancesConfig; -use pallet_grandpa::{ - EquivocationOffence as GrandpaEquivocationOffence, TimeSlot as GrandpaTimeSlot, -}; -use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; -use pallet_session::{ - historical::{Config as HistoricalConfig, IdentificationTuple}, - Config as SessionConfig, Pallet as Session, SessionManager, -}; -use pallet_staking::{ - Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, - RewardDestination, ValidatorPrefs, -}; - -const SEED: u32 = 0; - -const MAX_NOMINATORS: u32 = 100; - -pub struct Pallet(Offences); - -pub trait Config: - SessionConfig - + StakingConfig - + OffencesConfig - + HistoricalConfig - + BalancesConfig - + IdTupleConvert -{ -} - -/// A helper trait to make sure we can convert `IdentificationTuple` coming from historical -/// and the one required by offences. -pub trait IdTupleConvert { - /// Convert identification tuple from `historical` trait to the one expected by `offences`. - fn convert(id: IdentificationTuple) -> ::IdentificationTuple; -} - -impl IdTupleConvert for T -where - ::IdentificationTuple: From>, -{ - fn convert(id: IdentificationTuple) -> ::IdentificationTuple { - id.into() - } -} - -type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -struct Offender { - pub controller: T::AccountId, - #[allow(dead_code)] - pub stash: T::AccountId, - #[allow(dead_code)] - pub nominator_stashes: Vec, -} - -fn bond_amount() -> BalanceOf { - T::Currency::minimum_balance().saturating_mul(10_000u32.into()) -} - -fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { - let stash: T::AccountId = account("stash", n, SEED); - let stash_lookup: LookupSourceOf = T::Lookup::unlookup(stash.clone()); - let reward_destination = RewardDestination::Staked; - let amount = bond_amount::(); - // add twice as much balance to prevent the account from being killed. - let free_amount = amount.saturating_mul(2u32.into()); - T::Currency::make_free_balance_be(&stash, free_amount); - Staking::::bond( - RawOrigin::Signed(stash.clone()).into(), - amount, - reward_destination.clone(), - )?; - - let validator_prefs = - ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; - Staking::::validate(RawOrigin::Signed(stash.clone()).into(), validator_prefs)?; - - let mut individual_exposures = vec![]; - let mut nominator_stashes = vec![]; - // Create n nominators - for i in 0..nominators { - let nominator_stash: T::AccountId = - account("nominator stash", n * MAX_NOMINATORS + i, SEED); - T::Currency::make_free_balance_be(&nominator_stash, free_amount); - - Staking::::bond( - RawOrigin::Signed(nominator_stash.clone()).into(), - amount, - reward_destination.clone(), - )?; - - let selected_validators: Vec> = vec![stash_lookup.clone()]; - Staking::::nominate( - RawOrigin::Signed(nominator_stash.clone()).into(), - selected_validators, - )?; - - individual_exposures - .push(IndividualExposure { who: nominator_stash.clone(), value: amount }); - nominator_stashes.push(nominator_stash.clone()); - } - - let exposure = Exposure { total: amount * n.into(), own: amount, others: individual_exposures }; - let current_era = 0u32; - Staking::::add_era_stakers(current_era, stash.clone(), exposure); - - Ok(Offender { controller: stash.clone(), stash, nominator_stashes }) -} - -fn make_offenders( - num_offenders: u32, - num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { - Staking::::new_session(0); - - let mut offenders = vec![]; - for i in 0..num_offenders { - let offender = create_offender::(i + 1, num_nominators)?; - offenders.push(offender); - } - - Staking::::start_session(0); - - let id_tuples = offenders - .iter() - .map(|offender| { - ::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id") - }) - .map(|validator_id| { - ::FullIdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification") - }) - .collect::>>(); - Ok((id_tuples, offenders)) -} - -benchmarks! { - report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - - // for grandpa equivocation reports the number of reporters - // and offenders is always 1 - let reporters = vec![account("reporter", 1, SEED)]; - - // make sure reporters actually get rewarded - Staking::::set_slash_reward_fraction(Perbill::one()); - - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let validator_set_count = Session::::validators().len() as u32; - - let offence = GrandpaEquivocationOffence { - time_slot: GrandpaTimeSlot { set_id: 0, round: 0 }, - session_index: 0, - validator_set_count, - offender: T::convert(offenders.pop().unwrap()), - }; - assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { - // make sure that all slashes have been applied - #[cfg(test)] - assert_eq!( - System::::event_count(), 0 - + 1 // offence - + 3 // reporter (reward + endowment) - + 1 // offenders reported - + 3 // offenders slashed - + 1 // offenders chilled - + 3 * n // nominators slashed - ); - } - - report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - - // for babe equivocation reports the number of reporters - // and offenders is always 1 - let reporters = vec![account("reporter", 1, SEED)]; - - // make sure reporters actually get rewarded - Staking::::set_slash_reward_fraction(Perbill::one()); - - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let validator_set_count = Session::::validators().len() as u32; - - let offence = BabeEquivocationOffence { - slot: 0u64.into(), - session_index: 0, - validator_set_count, - offender: T::convert(offenders.pop().unwrap()), - }; - assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { - // make sure that all slashes have been applied - #[cfg(test)] - assert_eq!( - System::::event_count(), 0 - + 1 // offence - + 3 // reporter (reward + endowment) - + 1 // offenders reported - + 3 // offenders slashed - + 1 // offenders chilled - + 3 * n // nominators slashed - ); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index ea2e9e93ed6..27129e73c71 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -17,9 +17,6 @@ //! Mock file for offences benchmarking. -#![cfg(test)] - -use super::*; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, @@ -33,7 +30,7 @@ use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::IdentityLookup, - BuildStorage, + BuildStorage, Perbill, }; type AccountId = u64; diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index ad3dcf1f90e..f4d83c237b9 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -24,7 +24,7 @@ pallet-staking = { path = "../staking", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime" } +sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false } [dev-dependencies] @@ -34,7 +34,7 @@ pallet-staking-reward-curve = { path = "../staking/reward-curve" } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-std = { path = "../../primitives/std" } frame-election-provider-support = { path = "../election-provider-support" } @@ -74,5 +74,4 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index e6bb5bb1881..24d259ed1d4 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -27,6 +27,9 @@ mod mock; #[cfg(test)] mod tests; +extern crate alloc; + +use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; use sp_runtime::Perbill; @@ -112,7 +115,7 @@ pub mod pallet { .into_iter() .map(|(o, _)| OffenceDetails:: { offender: (o.clone(), Staking::::eras_stakers(now, &o)), - reporters: vec![], + reporters: Default::default(), }) .collect()) } diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs new file mode 100644 index 00000000000..d86c5d9ad27 --- /dev/null +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Session Pallet. +// This is separated into its own crate due to cyclic dependency issues. + +use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; +use sp_std::{prelude::*, vec}; + +use codec::Decode; +use frame_benchmarking::v1::benchmarks; +use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; +use pallet_staking::{ + benchmarking::create_validator_with_nominators, testing_utils::create_validators, + MaxNominationsOf, RewardDestination, +}; + +const MAX_VALIDATORS: u32 = 1000; + +pub struct Pallet(pallet_session::Pallet); +pub trait Config: + pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config +{ +} + +impl OnInitialize> for Pallet { + fn on_initialize(n: BlockNumberFor) -> frame_support::weights::Weight { + pallet_session::Pallet::::on_initialize(n) + } +} + +benchmarks! { + set_keys { + let n = MaxNominationsOf::::get(); + let (v_stash, _) = create_validator_with_nominators::( + n, + MaxNominationsOf::::get(), + false, + true, + RewardDestination::Staked, + )?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; + + let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + // Whitelist controller account from further DB operations. + let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); + frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); + }: _(RawOrigin::Signed(v_controller), keys, proof) + + purge_keys { + let n = MaxNominationsOf::::get(); + let (v_stash, _) = create_validator_with_nominators::( + n, + MaxNominationsOf::::get(), + false, + true, + RewardDestination::Staked, + )?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; + let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; + // Whitelist controller account from further DB operations. + let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); + frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); + }: _(RawOrigin::Signed(v_controller)) + + #[extra] + check_membership_proof_current_session { + let n in 2 .. MAX_VALIDATORS as u32; + + let (key, key_owner_proof1) = check_membership_proof_setup::(n); + let key_owner_proof2 = key_owner_proof1.clone(); + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); + } + + #[extra] + check_membership_proof_historical_session { + let n in 2 .. MAX_VALIDATORS as u32; + + let (key, key_owner_proof1) = check_membership_proof_setup::(n); + + // skip to the next session so that the session is historical + // and the membership merkle proof must be checked. + Session::::rotate_session(); + + let key_owner_proof2 = key_owner_proof1.clone(); + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); +} + +/// Sets up the benchmark for checking a membership proof. It creates the given +/// number of validators, sets random session keys and then creates a membership +/// proof for the first authority and returns its key and the proof. +fn check_membership_proof_setup( + n: u32, +) -> ((sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof) { + pallet_staking::ValidatorCount::::put(n); + + // create validators and set random session keys + for (n, who) in create_validators::(n, 1000).unwrap().into_iter().enumerate() { + use rand::{RngCore, SeedableRng}; + + let validator = T::Lookup::lookup(who).unwrap(); + let controller = pallet_staking::Pallet::::bonded(&validator).unwrap(); + + let keys = { + let mut keys = [0u8; 128]; + + // we keep the keys for the first validator as 0x00000... + if n > 0 { + let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); + rng.fill_bytes(&mut keys); + } + + keys + }; + + let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap(); + let proof: Vec = vec![]; + + Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); + } + + Pallet::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::one()); + + // skip sessions until the new validator set is enacted + while Session::::validators().len() < n as usize { + Session::::rotate_session(); + } + + let key = (sp_runtime::KeyTypeId(*b"babe"), &[0u8; 32]); + + (key, Historical::::prove(key).unwrap()) +} diff --git a/substrate/frame/session/benchmarking/src/lib.rs b/substrate/frame/session/benchmarking/src/lib.rs index 84258d84994..b08955a1332 100644 --- a/substrate/frame/session/benchmarking/src/lib.rs +++ b/substrate/frame/session/benchmarking/src/lib.rs @@ -15,153 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Benchmarks for the Session Pallet. -// This is separated into its own crate due to cyclic dependency issues. +//! Offences pallet benchmarking. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; -use sp_std::{prelude::*, vec}; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -use codec::Decode; -use frame_benchmarking::v1::benchmarks; -use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; -use pallet_staking::{ - benchmarking::create_validator_with_nominators, testing_utils::create_validators, - MaxNominationsOf, RewardDestination, -}; - -const MAX_VALIDATORS: u32 = 1000; - -pub struct Pallet(pallet_session::Pallet); -pub trait Config: - pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config -{ -} - -impl OnInitialize> for Pallet { - fn on_initialize(n: BlockNumberFor) -> frame_support::weights::Weight { - pallet_session::Pallet::::on_initialize(n) - } -} - -benchmarks! { - set_keys { - let n = MaxNominationsOf::::get(); - let (v_stash, _) = create_validator_with_nominators::( - n, - MaxNominationsOf::::get(), - false, - true, - RewardDestination::Staked, - )?; - let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; - - let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - // Whitelist controller account from further DB operations. - let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); - frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller), keys, proof) - - purge_keys { - let n = MaxNominationsOf::::get(); - let (v_stash, _) = create_validator_with_nominators::( - n, - MaxNominationsOf::::get(), - false, - true, - RewardDestination::Staked, - )?; - let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; - let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; - // Whitelist controller account from further DB operations. - let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); - frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller)) - - #[extra] - check_membership_proof_current_session { - let n in 2 .. MAX_VALIDATORS as u32; - - let (key, key_owner_proof1) = check_membership_proof_setup::(n); - let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { - assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); - } - - #[extra] - check_membership_proof_historical_session { - let n in 2 .. MAX_VALIDATORS as u32; - - let (key, key_owner_proof1) = check_membership_proof_setup::(n); - - // skip to the next session so that the session is historical - // and the membership merkle proof must be checked. - Session::::rotate_session(); - - let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { - assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); -} - -/// Sets up the benchmark for checking a membership proof. It creates the given -/// number of validators, sets random session keys and then creates a membership -/// proof for the first authority and returns its key and the proof. -fn check_membership_proof_setup( - n: u32, -) -> ((sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof) { - pallet_staking::ValidatorCount::::put(n); - - // create validators and set random session keys - for (n, who) in create_validators::(n, 1000).unwrap().into_iter().enumerate() { - use rand::{RngCore, SeedableRng}; - - let validator = T::Lookup::lookup(who).unwrap(); - let controller = pallet_staking::Pallet::::bonded(&validator).unwrap(); - - let keys = { - let mut keys = [0u8; 128]; - - // we keep the keys for the first validator as 0x00000... - if n > 0 { - let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); - rng.fill_bytes(&mut keys); - } - - keys - }; - - let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap(); - let proof: Vec = vec![]; - - Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); - } - - Pallet::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::one()); - - // skip sessions until the new validator set is enacted - while Session::::validators().len() < n as usize { - Session::::rotate_session(); - } - - let key = (sp_runtime::KeyTypeId(*b"babe"), &[0u8; 32]); - - (key, Historical::::prove(key).unwrap()) -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index f93f4d31e77..90c446808da 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -34,9 +34,9 @@ //! //! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). //! -//! ## Warning: Experimental +//! ## WARNING: Experimental //! -//! This crate and all of its content is experimental, and should not yet be used in production. +//! **This crate and all of its content is experimental, and should not yet be used in production.** //! //! ## Underlying dependencies //! diff --git a/substrate/frame/system/benchmarking/src/inner.rs b/substrate/frame/system/benchmarking/src/inner.rs new file mode 100644 index 00000000000..c1631b0a2e3 --- /dev/null +++ b/substrate/frame/system/benchmarking/src/inner.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Frame System benchmarks. + +use codec::Encode; +use frame_benchmarking::v2::*; +use frame_support::{dispatch::DispatchClass, storage, traits::Get}; +use frame_system::{Call, Pallet as System, RawOrigin}; +use sp_core::storage::well_known_keys; +use sp_runtime::traits::Hash; +use sp_std::{prelude::*, vec}; + +pub struct Pallet(System); +pub trait Config: frame_system::Config { + /// Adds ability to the Runtime to test against their sample code. + /// + /// Default is `../res/kitchensink_runtime.compact.compressed.wasm`. + fn prepare_set_code_data() -> Vec { + include_bytes!("../res/kitchensink_runtime.compact.compressed.wasm").to_vec() + } + + /// Adds ability to the Runtime to prepare/initialize before running benchmark `set_code`. + fn setup_set_code_requirements(_code: &Vec) -> Result<(), BenchmarkError> { + Ok(()) + } + + /// Adds ability to the Runtime to do custom validation after benchmark. + /// + /// Default is checking for `CodeUpdated` event . + fn verify_set_code() { + System::::assert_last_event(frame_system::Event::::CodeUpdated.into()); + } +} + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn remark( + b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, + ) -> Result<(), BenchmarkError> { + let remark_message = vec![1; b as usize]; + let caller = whitelisted_caller(); + + #[extrinsic_call] + remark(RawOrigin::Signed(caller), remark_message); + + Ok(()) + } + + #[benchmark] + fn remark_with_event( + b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, + ) -> Result<(), BenchmarkError> { + let remark_message = vec![1; b as usize]; + let caller: T::AccountId = whitelisted_caller(); + let hash = T::Hashing::hash(&remark_message[..]); + + #[extrinsic_call] + remark_with_event(RawOrigin::Signed(caller.clone()), remark_message); + + System::::assert_last_event( + frame_system::Event::::Remarked { sender: caller, hash }.into(), + ); + Ok(()) + } + + #[benchmark] + fn set_heap_pages() -> Result<(), BenchmarkError> { + #[extrinsic_call] + set_heap_pages(RawOrigin::Root, Default::default()); + + Ok(()) + } + + #[benchmark] + fn set_code() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + + #[extrinsic_call] + set_code(RawOrigin::Root, runtime_blob); + + T::verify_set_code(); + Ok(()) + } + + #[benchmark(extra)] + fn set_code_without_checks() -> Result<(), BenchmarkError> { + // Assume Wasm ~4MB + let code = vec![1; 4_000_000 as usize]; + T::setup_set_code_requirements(&code)?; + + #[block] + { + System::::set_code_without_checks(RawOrigin::Root.into(), code)?; + } + + let current_code = + storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; + assert_eq!(current_code.len(), 4_000_000 as usize); + Ok(()) + } + + #[benchmark(skip_meta)] + fn set_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { + // Set up i items to add + let mut items = Vec::new(); + for j in 0..i { + let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); + items.push((hash.clone(), hash.clone())); + } + + let items_to_verify = items.clone(); + + #[extrinsic_call] + set_storage(RawOrigin::Root, items); + + // Verify that they're actually in the storage. + for (item, _) in items_to_verify { + let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + Ok(()) + } + + #[benchmark(skip_meta)] + fn kill_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { + // Add i items to storage + let mut items = Vec::with_capacity(i as usize); + for j in 0..i { + let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); + storage::unhashed::put_raw(&hash, &hash); + items.push(hash); + } + + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + + let items_to_verify = items.clone(); + + #[extrinsic_call] + kill_storage(RawOrigin::Root, items); + + // Verify that they're not in the storage anymore. + for item in items_to_verify { + assert!(storage::unhashed::get_raw(&item).is_none()); + } + Ok(()) + } + + #[benchmark(skip_meta)] + fn kill_prefix(p: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { + let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); + let mut items = Vec::with_capacity(p as usize); + // add p items that share a prefix + for i in 0..p { + let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); + let key = [&prefix[..], &hash[..]].concat(); + storage::unhashed::put_raw(&key, &key); + items.push(key); + } + + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + + #[extrinsic_call] + kill_prefix(RawOrigin::Root, prefix, p); + + // Verify that they're not in the storage anymore. + for item in items { + assert!(storage::unhashed::get_raw(&item).is_none()); + } + Ok(()) + } + + #[benchmark] + fn authorize_upgrade() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + let hash = T::Hashing::hash(&runtime_blob); + + #[extrinsic_call] + authorize_upgrade(RawOrigin::Root, hash); + + assert!(System::::authorized_upgrade().is_some()); + Ok(()) + } + + #[benchmark] + fn apply_authorized_upgrade() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + let hash = T::Hashing::hash(&runtime_blob); + // Will be heavier when it needs to do verification (i.e. don't use `...without_checks`). + System::::authorize_upgrade(RawOrigin::Root.into(), hash)?; + + #[extrinsic_call] + apply_authorized_upgrade(RawOrigin::Root, runtime_blob); + + // Can't check for `CodeUpdated` in parachain upgrades. Just check that the authorization is + // gone. + assert!(System::::authorized_upgrade().is_none()); + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index 29100faa751..e55038aeb95 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -15,221 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Benchmarks for Utility Pallet +//! Frame System benchmarks. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "runtime-benchmarks")] -use codec::Encode; -use frame_benchmarking::v2::*; -use frame_support::{dispatch::DispatchClass, storage, traits::Get}; -use frame_system::{Call, Pallet as System, RawOrigin}; -use sp_core::storage::well_known_keys; -use sp_runtime::traits::Hash; -use sp_std::{prelude::*, vec}; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -pub struct Pallet(System); -pub trait Config: frame_system::Config { - /// Adds ability to the Runtime to test against their sample code. - /// - /// Default is `../res/kitchensink_runtime.compact.compressed.wasm`. - fn prepare_set_code_data() -> Vec { - include_bytes!("../res/kitchensink_runtime.compact.compressed.wasm").to_vec() - } - - /// Adds ability to the Runtime to prepare/initialize before running benchmark `set_code`. - fn setup_set_code_requirements(_code: &Vec) -> Result<(), BenchmarkError> { - Ok(()) - } - - /// Adds ability to the Runtime to do custom validation after benchmark. - /// - /// Default is checking for `CodeUpdated` event . - fn verify_set_code() { - System::::assert_last_event(frame_system::Event::::CodeUpdated.into()); - } -} - -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn remark( - b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, - ) -> Result<(), BenchmarkError> { - let remark_message = vec![1; b as usize]; - let caller = whitelisted_caller(); - - #[extrinsic_call] - remark(RawOrigin::Signed(caller), remark_message); - - Ok(()) - } - - #[benchmark] - fn remark_with_event( - b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, - ) -> Result<(), BenchmarkError> { - let remark_message = vec![1; b as usize]; - let caller: T::AccountId = whitelisted_caller(); - let hash = T::Hashing::hash(&remark_message[..]); - - #[extrinsic_call] - remark_with_event(RawOrigin::Signed(caller.clone()), remark_message); - - System::::assert_last_event( - frame_system::Event::::Remarked { sender: caller, hash }.into(), - ); - Ok(()) - } - - #[benchmark] - fn set_heap_pages() -> Result<(), BenchmarkError> { - #[extrinsic_call] - set_heap_pages(RawOrigin::Root, Default::default()); - - Ok(()) - } - - #[benchmark] - fn set_code() -> Result<(), BenchmarkError> { - let runtime_blob = T::prepare_set_code_data(); - T::setup_set_code_requirements(&runtime_blob)?; - - #[extrinsic_call] - set_code(RawOrigin::Root, runtime_blob); - - T::verify_set_code(); - Ok(()) - } - - #[benchmark(extra)] - fn set_code_without_checks() -> Result<(), BenchmarkError> { - // Assume Wasm ~4MB - let code = vec![1; 4_000_000 as usize]; - T::setup_set_code_requirements(&code)?; - - #[block] - { - System::::set_code_without_checks(RawOrigin::Root.into(), code)?; - } - - let current_code = - storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; - assert_eq!(current_code.len(), 4_000_000 as usize); - Ok(()) - } - - #[benchmark(skip_meta)] - fn set_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { - // Set up i items to add - let mut items = Vec::new(); - for j in 0..i { - let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); - items.push((hash.clone(), hash.clone())); - } - - let items_to_verify = items.clone(); - - #[extrinsic_call] - set_storage(RawOrigin::Root, items); - - // Verify that they're actually in the storage. - for (item, _) in items_to_verify { - let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; - assert_eq!(value, *item); - } - Ok(()) - } - - #[benchmark(skip_meta)] - fn kill_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { - // Add i items to storage - let mut items = Vec::with_capacity(i as usize); - for j in 0..i { - let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); - storage::unhashed::put_raw(&hash, &hash); - items.push(hash); - } - - // Verify that they're actually in the storage. - for item in &items { - let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; - assert_eq!(value, *item); - } - - let items_to_verify = items.clone(); - - #[extrinsic_call] - kill_storage(RawOrigin::Root, items); - - // Verify that they're not in the storage anymore. - for item in items_to_verify { - assert!(storage::unhashed::get_raw(&item).is_none()); - } - Ok(()) - } - - #[benchmark(skip_meta)] - fn kill_prefix(p: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { - let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); - let mut items = Vec::with_capacity(p as usize); - // add p items that share a prefix - for i in 0..p { - let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); - let key = [&prefix[..], &hash[..]].concat(); - storage::unhashed::put_raw(&key, &key); - items.push(key); - } - - // Verify that they're actually in the storage. - for item in &items { - let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; - assert_eq!(value, *item); - } - - #[extrinsic_call] - kill_prefix(RawOrigin::Root, prefix, p); - - // Verify that they're not in the storage anymore. - for item in items { - assert!(storage::unhashed::get_raw(&item).is_none()); - } - Ok(()) - } - - #[benchmark] - fn authorize_upgrade() -> Result<(), BenchmarkError> { - let runtime_blob = T::prepare_set_code_data(); - T::setup_set_code_requirements(&runtime_blob)?; - let hash = T::Hashing::hash(&runtime_blob); - - #[extrinsic_call] - authorize_upgrade(RawOrigin::Root, hash); - - assert!(System::::authorized_upgrade().is_some()); - Ok(()) - } - - #[benchmark] - fn apply_authorized_upgrade() -> Result<(), BenchmarkError> { - let runtime_blob = T::prepare_set_code_data(); - T::setup_set_code_requirements(&runtime_blob)?; - let hash = T::Hashing::hash(&runtime_blob); - // Will be heavier when it needs to do verification (i.e. don't use `...without_checks`). - System::::authorize_upgrade(RawOrigin::Root.into(), hash)?; - - #[extrinsic_call] - apply_authorized_upgrade(RawOrigin::Root, runtime_blob); - - // Can't check for `CodeUpdated` in parachain upgrades. Just check that the authorization is - // gone. - assert!(System::::authorized_upgrade().is_none()); - Ok(()) - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/try-runtime/src/inner.rs b/substrate/frame/try-runtime/src/inner.rs new file mode 100644 index 00000000000..591124e2ad9 --- /dev/null +++ b/substrate/frame/try-runtime/src/inner.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Supporting types for try-runtime, testing and dry-running commands. + +pub use frame_support::traits::{TryStateSelect, UpgradeCheckSelect}; +use frame_support::weights::Weight; + +sp_api::decl_runtime_apis! { + /// Runtime api for testing the execution of a runtime upgrade. + pub trait TryRuntime { + /// dry-run runtime upgrades, returning the total weight consumed. + /// + /// This should do EXACTLY the same operations as the runtime would have done in the case of + /// a runtime upgrade (e.g. pallet ordering must be the same) + /// + /// Returns the consumed weight of the migration in case of a successful one, combined with + /// the total allowed block weight of the runtime. + /// + /// If `checks` is `true`, `pre_migrate` and `post_migrate` of each migration and + /// `try_state` of all pallets will be executed. Else, no. If checks are executed, the PoV + /// tracking is likely inaccurate. + fn on_runtime_upgrade(checks: UpgradeCheckSelect) -> (Weight, Weight); + + /// Execute the given block, but optionally disable state-root and signature checks. + /// + /// Optionally, a number of `try_state` hooks can also be executed after the block + /// execution. + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + try_state: TryStateSelect, + ) -> Weight; + } +} diff --git a/substrate/frame/try-runtime/src/lib.rs b/substrate/frame/try-runtime/src/lib.rs index 43292efe210..9da2dd18ca2 100644 --- a/substrate/frame/try-runtime/src/lib.rs +++ b/substrate/frame/try-runtime/src/lib.rs @@ -18,36 +18,9 @@ //! Supporting types for try-runtime, testing and dry-running commands. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "try-runtime")] -pub use frame_support::traits::{TryStateSelect, UpgradeCheckSelect}; -use frame_support::weights::Weight; +#[cfg(feature = "try-runtime")] +pub mod inner; -sp_api::decl_runtime_apis! { - /// Runtime api for testing the execution of a runtime upgrade. - pub trait TryRuntime { - /// dry-run runtime upgrades, returning the total weight consumed. - /// - /// This should do EXACTLY the same operations as the runtime would have done in the case of - /// a runtime upgrade (e.g. pallet ordering must be the same) - /// - /// Returns the consumed weight of the migration in case of a successful one, combined with - /// the total allowed block weight of the runtime. - /// - /// If `checks` is `true`, `pre_migrate` and `post_migrate` of each migration and - /// `try_state` of all pallets will be executed. Else, no. If checks are executed, the PoV - /// tracking is likely inaccurate. - fn on_runtime_upgrade(checks: UpgradeCheckSelect) -> (Weight, Weight); - - /// Execute the given block, but optionally disable state-root and signature checks. - /// - /// Optionally, a number of `try_state` hooks can also be executed after the block - /// execution. - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - try_state: TryStateSelect, - ) -> Weight; - } -} +#[cfg(feature = "try-runtime")] +pub use inner::*; diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 2420f48b1f4..799d474aebe 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -26,7 +26,7 @@ sp-consensus-slots = { path = "../slots", default-features = false } sp-core = { path = "../../core", default-features = false } sp-inherents = { path = "../../inherents", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } -sp-timestamp = { path = "../../timestamp", optional = true } +sp-timestamp = { path = "../../timestamp", optional = true, default-features = false } [features] default = ["std"] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 833b2af95cd..8437497b02b 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -37,7 +37,7 @@ ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-storage = { path = "../storage", default-features = false } -sp-externalities = { path = "../externalities", optional = true } +sp-externalities = { path = "../externalities", optional = true, default-features = false } futures = { version = "0.3.30", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { optional = true, workspace = true } diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index cdee4fb03e1..5314ccd6d96 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -20,9 +20,9 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } -sp-runtime = { path = "../runtime", optional = true } +sp-runtime = { path = "../runtime", optional = true, default-features = false } sp-staking = { path = "../staking", default-features = false } -sp-keystore = { path = "../keystore", optional = true } +sp-keystore = { path = "../keystore", optional = true, default-features = false } [features] default = ["std"] diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 137a232fce7..6cce469d3f9 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-core = { path = "../core", optional = true } +sp-core = { path = "../core", optional = true, default-features = false } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } -sp-trie = { path = "../trie", optional = true } +sp-trie = { path = "../trie", optional = true, default-features = false } [features] default = ["std"] -- GitLab From aa78fe218014f653f883173a6e8d610964d56fbe Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 17 Apr 2024 18:43:56 +0200 Subject: [PATCH 020/107] Contracts: Refactor test builder (#4158) - Moved `substrate/frame/contracts/src/tests/builder.rs` into a pub test_utils module, so we can use that in the `pallet-contracts-mock-network` tests - Refactor xcm tests to use XCM builders, and simplify the use case for xcm-send --- .../frame/contracts/mock-network/src/lib.rs | 3 +- .../frame/contracts/mock-network/src/tests.rs | 168 +++++-------- substrate/frame/contracts/src/lib.rs | 1 + substrate/frame/contracts/src/test_utils.rs | 30 +++ .../frame/contracts/src/test_utils/builder.rs | 220 ++++++++++++++++++ substrate/frame/contracts/src/tests.rs | 43 +++- .../frame/contracts/src/tests/builder.rs | 219 ----------------- 7 files changed, 346 insertions(+), 338 deletions(-) create mode 100644 substrate/frame/contracts/src/test_utils.rs create mode 100644 substrate/frame/contracts/src/test_utils/builder.rs delete mode 100644 substrate/frame/contracts/src/tests/builder.rs diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs index 8a17a3f2fa7..20ded0f4a0b 100644 --- a/substrate/frame/contracts/mock-network/src/lib.rs +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -23,6 +23,7 @@ pub mod relay_chain; mod tests; use crate::primitives::{AccountId, UNITS}; +pub use pallet_contracts::test_utils::{ALICE, BOB}; use sp_runtime::BuildStorage; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -31,8 +32,6 @@ use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chai // Accounts pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); -pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); -pub const BOB: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([2u8; 32]); // Balances pub const INITIAL_BALANCE: u128 = 1_000_000_000 * UNITS; diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index 39aa9bebc0f..5632f75e787 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -22,45 +22,31 @@ use crate::{ relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; -use frame_support::{ - pallet_prelude::Weight, - traits::{fungibles::Mutate, Currency}, -}; -use pallet_balances::{BalanceLock, Reasons}; -use pallet_contracts::{Code, CollectEvents, DebugInfo, Determinism}; +use frame_support::traits::{fungibles::Mutate, Currency}; +use pallet_contracts::{test_utils::builder::*, Code}; use pallet_contracts_fixtures::compile_module; use pallet_contracts_uapi::ReturnErrorCode; use xcm::{v4::prelude::*, VersionedLocation, VersionedXcm}; use xcm_simulator::TestExt; -type ParachainContracts = pallet_contracts::Pallet; - macro_rules! assert_return_code { ( $x:expr , $y:expr $(,)? ) => {{ assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); }}; } +fn bare_call(dest: sp_runtime::AccountId32) -> BareCallBuilder { + BareCallBuilder::::bare_call(ALICE, dest) +} + /// Instantiate the tests contract, and fund it with some balance and assets. fn instantiate_test_contract(name: &str) -> AccountId { let (wasm, _) = compile_module::(name).unwrap(); // Instantiate contract. let contract_addr = ParaA::execute_with(|| { - ParachainContracts::bare_instantiate( - ALICE, - 0, - Weight::MAX, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id + BareInstantiateBuilder::::bare_instantiate(ALICE, Code::Upload(wasm)) + .build_and_unwrap_account_id() }); // Funds contract account with some balance and assets. @@ -85,27 +71,18 @@ fn test_xcm_execute() { // Execute XCM instructions through the contract. ParaA::execute_with(|| { let amount: u128 = 10 * CENTS; + let assets: Asset = (Here, amount).into(); + let beneficiary = AccountId32 { network: None, id: BOB.clone().into() }; // The XCM used to transfer funds to Bob. - let message: Xcm<()> = Xcm(vec![ - WithdrawAsset(vec![(Here, amount).into()].into()), - DepositAsset { - assets: All.into(), - beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), - }, - ]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode().encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(assets.clone()) + .deposit_asset(assets, beneficiary) + .build(); + + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode().encode()) + .build(); assert_eq!(result.gas_consumed, result.gas_required); assert_return_code!(&result.result.unwrap(), ReturnErrorCode::Success); @@ -127,29 +104,22 @@ fn test_xcm_execute_incomplete() { // Execute XCM instructions through the contract. ParaA::execute_with(|| { + let assets: Asset = (Here, amount).into(); + let beneficiary = AccountId32 { network: None, id: BOB.clone().into() }; + // The XCM used to transfer funds to Bob. - let message: Xcm<()> = Xcm(vec![ - WithdrawAsset(vec![(Here, amount).into()].into()), + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(assets.clone()) // This will fail as the contract does not have enough balance to complete both // withdrawals. - WithdrawAsset(vec![(Here, INITIAL_BALANCE).into()].into()), - DepositAsset { - assets: All.into(), - beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), - }, - ]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode().encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + .withdraw_asset((Here, INITIAL_BALANCE)) + .buy_execution(assets.clone(), Unlimited) + .deposit_asset(assets, beneficiary) + .build(); + + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode().encode()) + .build(); assert_eq!(result.gas_consumed, result.gas_required); assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); @@ -175,28 +145,16 @@ fn test_xcm_execute_reentrant_call() { }); // The XCM used to transfer funds to Bob. - let message: Xcm = Xcm(vec![ - Transact { - origin_kind: OriginKind::Native, - require_weight_at_most: 1_000_000_000.into(), - call: transact_call.encode().into(), - }, - ExpectTransactStatus(MaybeErrorCode::Success), - ]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode().encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let message: Xcm = Xcm::builder_unsafe() + .transact(OriginKind::Native, 1_000_000_000, transact_call.encode()) + .expect_transact_status(MaybeErrorCode::Success) + .build(); - assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode().encode()) + .build_and_unwrap_result(); + + assert_return_code!(&result, ReturnErrorCode::XcmExecutionFailed); // Funds should not change hands as the XCM transact failed. assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); @@ -207,40 +165,36 @@ fn test_xcm_execute_reentrant_call() { fn test_xcm_send() { MockNet::reset(); let contract_addr = instantiate_test_contract("xcm_send"); + let amount = 1_000 * CENTS; let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` - // Send XCM instructions through the contract, to lock some funds on the relay chain. + // Send XCM instructions through the contract, to transfer some funds from the contract + // derivative account to Alice on the relay chain. ParaA::execute_with(|| { - let dest = Location::from(Parent); - let dest = VersionedLocation::V4(dest); - - let message: Xcm<()> = Xcm(vec![ - WithdrawAsset((Here, fee).into()), - BuyExecution { fees: (Here, fee).into(), weight_limit: WeightLimit::Unlimited }, - LockAsset { asset: (Here, 5 * CENTS).into(), unlocker: (Parachain(1)).into() }, - ]); - let message = VersionedXcm::V4(message); - let exec = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - (dest, message.encode()).encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let dest = VersionedLocation::V4(Parent.into()); + let assets: Asset = (Here, amount).into(); + let beneficiary = AccountId32 { network: None, id: ALICE.clone().into() }; + + let message: Xcm<()> = Xcm::builder() + .withdraw_asset(assets.clone()) + .buy_execution((Here, fee), Unlimited) + .deposit_asset(assets, beneficiary) + .build(); + + let result = bare_call(contract_addr.clone()) + .data((dest, VersionedXcm::V4(message).encode()).encode()) + .build_and_unwrap_result(); - let mut data = &exec.result.unwrap().data[..]; + let mut data = &result.data[..]; XcmHash::decode(&mut data).expect("Failed to decode xcm_send message_id"); }); Relay::execute_with(|| { - // Check if the funds are locked on the relay chain. + let derived_contract_addr = ¶chain_account_sovereign_account_id(1, contract_addr); assert_eq!( - relay_chain::Balances::locks(¶chain_account_sovereign_account_id(1, contract_addr)), - vec![BalanceLock { id: *b"py/xcmlk", amount: 5 * CENTS, reasons: Reasons::All }] + INITIAL_BALANCE - amount, + relay_chain::Balances::free_balance(derived_contract_addr) ); + assert_eq!(INITIAL_BALANCE + amount - fee, relay_chain::Balances::free_balance(ALICE)); }); } diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index edc4c872bfc..b381fd2dc4f 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -101,6 +101,7 @@ mod wasm; pub mod chain_extension; pub mod debug; pub mod migration; +pub mod test_utils; pub mod weights; #[cfg(test)] diff --git a/substrate/frame/contracts/src/test_utils.rs b/substrate/frame/contracts/src/test_utils.rs new file mode 100644 index 00000000000..564b2d2e3bd --- /dev/null +++ b/substrate/frame/contracts/src/test_utils.rs @@ -0,0 +1,30 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Shared utilities for testing contracts. +//! This is not part of the tests module because it is made public for other crates to use. +#![cfg(feature = "std")] +use frame_support::weights::Weight; +pub use sp_runtime::AccountId32; + +pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); + +pub const GAS_LIMIT: Weight = Weight::from_parts(100_000_000_000, 3 * 1024 * 1024); +pub mod builder; diff --git a/substrate/frame/contracts/src/test_utils/builder.rs b/substrate/frame/contracts/src/test_utils/builder.rs new file mode 100644 index 00000000000..94540eca5b4 --- /dev/null +++ b/substrate/frame/contracts/src/test_utils/builder.rs @@ -0,0 +1,220 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::GAS_LIMIT; +use crate::{ + AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, Config, + ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, + ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, +}; +use codec::{Encode, HasCompact}; +use core::fmt::Debug; +use frame_support::pallet_prelude::DispatchResultWithPostInfo; +use paste::paste; +use scale_info::TypeInfo; + +/// Helper macro to generate a builder for contract API calls. +macro_rules! builder { + // Entry point to generate a builder for the given method. + ( + $method:ident($($field:ident: $type:ty,)*) -> $result:ty; + $($extra:item)* + ) => { + paste!{ + builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result; $($extra)*); + } + }; + // Generate the builder struct and its methods. + ( + $name:ident, + $method:ident($($field:ident: $type:ty,)*) -> $result:ty; + $($extra:item)* + ) => { + #[doc = concat!("A builder to construct a ", stringify!($method), " call")] + pub struct $name { + $($field: $type,)* + } + + #[allow(dead_code)] + impl $name + where + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + { + $( + #[doc = concat!("Set the ", stringify!($field))] + pub fn $field(mut self, value: $type) -> Self { + self.$field = value; + self + } + )* + + #[doc = concat!("Build the ", stringify!($method), " call")] + pub fn build(self) -> $result { + Pallet::::$method( + $(self.$field,)* + ) + } + + $($extra)* + } + } +} + +builder!( + instantiate_with_code( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo; + + /// Create an [`InstantiateWithCodeBuilder`] with default values. + pub fn instantiate_with_code(origin: OriginFor, code: Vec) -> Self { + Self { + origin: origin, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + } + } +); + +builder!( + instantiate( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo; + + /// Create an [`InstantiateBuilder`] with default values. + pub fn instantiate(origin: OriginFor, code_hash: CodeHash) -> Self { + Self { + origin, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code_hash, + data: vec![], + salt: vec![], + } + } +); + +builder!( + bare_instantiate( + origin: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + code: Code>, + data: Vec, + salt: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + ) -> ContractInstantiateResult, BalanceOf, EventRecordOf>; + + /// Build the instantiate call and unwrap the result. + pub fn build_and_unwrap_result(self) -> InstantiateReturnValue> { + self.build().result.unwrap() + } + + /// Build the instantiate call and unwrap the account id. + pub fn build_and_unwrap_account_id(self) -> AccountIdOf { + self.build().result.unwrap().account_id + } + + pub fn bare_instantiate(origin: AccountIdOf, code: Code>) -> Self { + Self { + origin, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + } + } +); + +builder!( + call( + origin: OriginFor, + dest: AccountIdLookupOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + data: Vec, + ) -> DispatchResultWithPostInfo; + + /// Create a [`CallBuilder`] with default values. + pub fn call(origin: OriginFor, dest: AccountIdLookupOf) -> Self { + CallBuilder { + origin, + dest, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + } + } +); + +builder!( + bare_call( + origin: AccountIdOf, + dest: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + data: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + determinism: Determinism, + ) -> ContractExecResult, EventRecordOf>; + + /// Build the call and unwrap the result. + pub fn build_and_unwrap_result(self) -> ExecReturnValue { + self.build().result.unwrap() + } + + /// Create a [`BareCallBuilder`] with default values. + pub fn bare_call(origin: AccountIdOf, dest: AccountIdOf) -> Self { + Self { + origin, + dest, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + determinism: Determinism::Enforced, + } + } +); diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 57b804a51e4..8fe845fcf0f 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod builder; mod pallet_dummy; mod test_debug; @@ -98,7 +97,6 @@ macro_rules! assert_refcount { } pub mod test_utils { - use super::{Contracts, DepositPerByte, DepositPerItem, Hash, SysConfig, Test}; use crate::{ exec::AccountIdOf, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, @@ -166,6 +164,38 @@ pub mod test_utils { } } +mod builder { + use super::Test; + use crate::{ + test_utils::{builder::*, AccountId32, ALICE}, + tests::RuntimeOrigin, + AccountIdLookupOf, Code, CodeHash, + }; + + pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { + BareInstantiateBuilder::::bare_instantiate(ALICE, code) + } + + pub fn bare_call(dest: AccountId32) -> BareCallBuilder { + BareCallBuilder::::bare_call(ALICE, dest) + } + + pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { + InstantiateWithCodeBuilder::::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + code, + ) + } + + pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { + InstantiateBuilder::::instantiate(RuntimeOrigin::signed(ALICE), code_hash) + } + + pub fn call(dest: AccountIdLookupOf) -> CallBuilder { + CallBuilder::::call(RuntimeOrigin::signed(ALICE), dest) + } +} + impl Test { pub fn set_unstable_interface(unstable_interface: bool) { UNSTABLE_INTERFACE.with(|v| *v.borrow_mut() = unstable_interface); @@ -2439,14 +2469,7 @@ fn failed_deposit_charge_should_roll_back_call() { transfer_proxy_call, ); - >::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - data.encode(), - ) + builder::call(addr_caller).data(data.encode()).build() }) }; diff --git a/substrate/frame/contracts/src/tests/builder.rs b/substrate/frame/contracts/src/tests/builder.rs deleted file mode 100644 index 08d12503a29..00000000000 --- a/substrate/frame/contracts/src/tests/builder.rs +++ /dev/null @@ -1,219 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{AccountId32, Test, ALICE, GAS_LIMIT}; -use crate::{ - tests::RuntimeOrigin, AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, - ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, - ExecReturnValue, OriginFor, Pallet, Weight, -}; -use codec::Compact; -use frame_support::pallet_prelude::DispatchResultWithPostInfo; -use paste::paste; - -/// Helper macro to generate a builder for contract API calls. -macro_rules! builder { - // Entry point to generate a builder for the given method. - ( - $method:ident($($field:ident: $type:ty,)*) -> $result:ty - ) => { - paste!{ - builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result); - } - }; - // Generate the builder struct and its methods. - ( - $name:ident, - $method:ident( - $($field:ident: $type:ty,)* - ) -> $result:ty - ) => { - #[doc = concat!("A builder to construct a ", stringify!($method), " call")] - pub struct $name { - $($field: $type,)* - } - - #[allow(dead_code)] - impl $name - { - $( - #[doc = concat!("Set the ", stringify!($field))] - pub fn $field(mut self, value: $type) -> Self { - self.$field = value; - self - } - )* - - #[doc = concat!("Build the ", stringify!($method), " call")] - pub fn build(self) -> $result { - Pallet::::$method( - $(self.$field,)* - ) - } - } - } -} - -builder!( - instantiate_with_code( - origin: OriginFor, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>>, - code: Vec, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo -); - -builder!( - instantiate( - origin: OriginFor, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>>, - code_hash: CodeHash, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo -); - -builder!( - bare_instantiate( - origin: AccountIdOf, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>, - code: Code>, - data: Vec, - salt: Vec, - debug: DebugInfo, - collect_events: CollectEvents, - ) -> ContractInstantiateResult, BalanceOf, EventRecordOf> -); - -builder!( - call( - origin: OriginFor, - dest: AccountIdLookupOf, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>>, - data: Vec, - ) -> DispatchResultWithPostInfo -); - -builder!( - bare_call( - origin: AccountIdOf, - dest: AccountIdOf, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>, - data: Vec, - debug: DebugInfo, - collect_events: CollectEvents, - determinism: Determinism, - ) -> ContractExecResult, EventRecordOf> -); - -/// Create a [`BareInstantiateBuilder`] with default values. -pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { - BareInstantiateBuilder { - origin: ALICE, - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - code, - data: vec![], - salt: vec![], - debug: DebugInfo::Skip, - collect_events: CollectEvents::Skip, - } -} - -impl BareInstantiateBuilder { - /// Build the instantiate call and unwrap the result. - pub fn build_and_unwrap_result(self) -> crate::InstantiateReturnValue> { - self.build().result.unwrap() - } - - /// Build the instantiate call and unwrap the account id. - pub fn build_and_unwrap_account_id(self) -> AccountIdOf { - self.build().result.unwrap().account_id - } -} - -/// Create a [`BareCallBuilder`] with default values. -pub fn bare_call(dest: AccountId32) -> BareCallBuilder { - BareCallBuilder { - origin: ALICE, - dest, - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - data: vec![], - debug: DebugInfo::Skip, - collect_events: CollectEvents::Skip, - determinism: Determinism::Enforced, - } -} - -impl BareCallBuilder { - /// Build the call and unwrap the result. - pub fn build_and_unwrap_result(self) -> ExecReturnValue { - self.build().result.unwrap() - } -} - -/// Create an [`InstantiateWithCodeBuilder`] with default values. -pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { - InstantiateWithCodeBuilder { - origin: RuntimeOrigin::signed(ALICE), - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - code, - data: vec![], - salt: vec![], - } -} - -/// Create an [`InstantiateBuilder`] with default values. -pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { - InstantiateBuilder { - origin: RuntimeOrigin::signed(ALICE), - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - code_hash, - data: vec![], - salt: vec![], - } -} - -/// Create a [`CallBuilder`] with default values. -pub fn call(dest: AccountIdLookupOf) -> CallBuilder { - CallBuilder { - origin: RuntimeOrigin::signed(ALICE), - dest, - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - data: vec![], - } -} -- GitLab From 305d311d5c732fcc4629f3295768f1ed44ef434c Mon Sep 17 00:00:00 2001 From: Muharem Date: Wed, 17 Apr 2024 18:45:01 +0200 Subject: [PATCH 021/107] Asset Conversion: Pool Touch Call (#3251) Introduce `touch` call designed to address operational prerequisites before providing liquidity to a pool. This function ensures that essential requirements, such as the presence of the pool's accounts, are fulfilled. It is particularly beneficial in scenarios where a pool creator removes the pool's accounts without providing liquidity. --------- Co-authored-by: command-bot <> --- .../src/weights/pallet_asset_conversion.rs | 22 +++ .../src/weights/pallet_asset_conversion.rs | 22 +++ prdoc/pr_3251.prdoc | 14 ++ .../asset-conversion/src/benchmarking.rs | 67 +++++++-- substrate/frame/asset-conversion/src/lib.rs | 65 ++++++++- .../frame/asset-conversion/src/weights.rs | 129 ++++++++++++------ substrate/frame/assets/src/lib.rs | 2 + 7 files changed, 268 insertions(+), 53 deletions(-) create mode 100644 prdoc/pr_3251.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs index 0486932d1d6..ec5a4084361 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs @@ -154,4 +154,26 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 381_000_000 picoseconds. + Weight::from_parts(398_540_909, 6360) + // Standard Error: 1_330_283 + .saturating_add(Weight::from_parts(209_463_636, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs index 7a5aed3d7c6..1c5b9be8f8e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs @@ -153,4 +153,26 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 381_000_000 picoseconds. + Weight::from_parts(398_540_909, 6360) + // Standard Error: 1_330_283 + .saturating_add(Weight::from_parts(209_463_636, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } diff --git a/prdoc/pr_3251.prdoc b/prdoc/pr_3251.prdoc new file mode 100644 index 00000000000..1f95c228f7a --- /dev/null +++ b/prdoc/pr_3251.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Asset Conversion: Pool Touch Call" + +doc: + - audience: Runtime Dev + description: | + Introduce `touch` call designed to address operational prerequisites before providing liquidity to a pool. + This function ensures that essential requirements, such as the presence of the pool's accounts, are fulfilled. + It is particularly beneficial in scenarios where a pool creator removes the pool's accounts without providing liquidity. + +crates: + - name: pallet-asset-conversion diff --git a/substrate/frame/asset-conversion/src/benchmarking.rs b/substrate/frame/asset-conversion/src/benchmarking.rs index f0e02c802ad..c5f68476b1d 100644 --- a/substrate/frame/asset-conversion/src/benchmarking.rs +++ b/substrate/frame/asset-conversion/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_support::{ assert_ok, traits::{ fungible::NativeOrWithId, - fungibles::{Create, Inspect, Mutate}, + fungibles::{Create, Inspect, Mutate, Refund}, }, }; use frame_system::RawOrigin as SystemOrigin; @@ -75,12 +75,21 @@ where } /// Create the `asset` and mint the `amount` for the `caller`. -fn create_asset(caller: &T::AccountId, asset: &T::AssetKind, amount: T::Balance) -where +fn create_asset( + caller: &T::AccountId, + asset: &T::AssetKind, + amount: T::Balance, + is_sufficient: bool, +) where T::Assets: Create + Mutate, { if !T::Assets::asset_exists(asset.clone()) { - assert_ok!(T::Assets::create(asset.clone(), caller.clone(), true, T::Balance::one())); + assert_ok!(T::Assets::create( + asset.clone(), + caller.clone(), + is_sufficient, + T::Balance::one() + )); } assert_ok!(T::Assets::mint_into( asset.clone(), @@ -141,8 +150,8 @@ where T::Assets::minimum_balance(asset1.clone()), T::Assets::minimum_balance(asset2.clone()), ); - create_asset::(caller, asset1, liquidity1); - create_asset::(caller, asset2, liquidity2); + create_asset::(caller, asset1, liquidity1, true); + create_asset::(caller, asset2, liquidity2, true); let lp_token = AssetConversion::::get_next_pool_asset_id(); mint_setup_fee_asset::(caller, asset1, asset2, &lp_token); @@ -172,8 +181,8 @@ mod benchmarks { fn create_pool() { let caller: T::AccountId = whitelisted_caller(); let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); - create_asset::(&caller, &asset1, T::Assets::minimum_balance(asset1.clone())); - create_asset::(&caller, &asset2, T::Assets::minimum_balance(asset2.clone())); + create_asset::(&caller, &asset1, T::Assets::minimum_balance(asset1.clone()), true); + create_asset::(&caller, &asset2, T::Assets::minimum_balance(asset2.clone()), true); let lp_token = AssetConversion::::get_next_pool_asset_id(); create_fee_asset::(&caller); @@ -358,5 +367,47 @@ mod benchmarks { assert_eq!(actual_balance, init_caller_balance + T::Balance::one()); } + #[benchmark] + fn touch(n: Linear<0, 3>) { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2).unwrap(); + let pool_account = T::PoolLocator::address(&pool_id).unwrap(); + + create_fee_asset::(&caller); + create_asset::(&caller, &asset1, ::Balance::one(), false); + create_asset::(&caller, &asset2, ::Balance::one(), false); + let lp_token = AssetConversion::::get_next_pool_asset_id(); + mint_setup_fee_asset::(&caller, &asset1, &asset2, &lp_token); + + assert_ok!(AssetConversion::::create_pool( + SystemOrigin::Signed(caller.clone()).into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()) + )); + + if n > 0 && + ::Assets::deposit_held(asset1.clone(), pool_account.clone()).is_some() + { + let _ = ::Assets::refund(asset1.clone(), pool_account.clone()); + } + if n > 1 && + ::Assets::deposit_held(asset2.clone(), pool_account.clone()).is_some() + { + let _ = ::Assets::refund(asset2.clone(), pool_account.clone()); + } + if n > 2 && + ::PoolAssets::deposit_held(lp_token.clone(), pool_account.clone()) + .is_some() + { + let _ = ::PoolAssets::refund(lp_token, pool_account); + } + + #[extrinsic_call] + _(SystemOrigin::Signed(caller.clone()), Box::new(asset1.clone()), Box::new(asset2.clone())); + + assert_last_event::(Event::Touched { pool_id, who: caller }.into()); + } + impl_benchmark_test_suite!(AssetConversion, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index 0bf73e8809c..bb6e70a7fe9 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -98,7 +98,10 @@ use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::{DispatchResult, *}, + traits::fungibles::Refund, + }; use frame_system::pallet_prelude::*; use sp_arithmetic::{traits::Unsigned, Permill}; @@ -130,7 +133,8 @@ pub mod pallet { type Assets: Inspect + Mutate + AccountTouch - + Balanced; + + Balanced + + Refund; /// Liquidity pool identifier. type PoolId: Parameter + MaxEncodedLen + Ord; @@ -149,7 +153,8 @@ pub mod pallet { type PoolAssets: Inspect + Create + Mutate - + AccountTouch; + + AccountTouch + + Refund; /// A % the liquidity providers will take of every swap. Represents 10ths of a percent. #[pallet::constant] @@ -281,6 +286,13 @@ pub mod pallet { /// E.g. (A, amount_in) -> (Dot, amount_out) -> (B, amount_out) path: BalancePath, }, + /// Pool has been touched in order to fulfill operational requirements. + Touched { + /// The ID of the pool. + pool_id: T::PoolId, + /// The account initiating the touch. + who: T::AccountId, + }, } #[pallet::error] @@ -391,7 +403,9 @@ pub mod pallet { NextPoolAssetId::::set(Some(next_lp_token_id)); T::PoolAssets::create(lp_token.clone(), pool_account.clone(), false, 1u32.into())?; - T::PoolAssets::touch(lp_token.clone(), &pool_account, &sender)?; + if T::PoolAssets::should_touch(lp_token.clone(), &pool_account) { + T::PoolAssets::touch(lp_token.clone(), &pool_account, &sender)? + }; let pool_info = PoolInfo { lp_token: lp_token.clone() }; Pools::::insert(pool_id.clone(), pool_info); @@ -656,6 +670,49 @@ pub mod pallet { )?; Ok(()) } + + /// Touch an existing pool to fulfill prerequisites before providing liquidity, such as + /// ensuring that the pool's accounts are in place. It is typically useful when a pool + /// creator removes the pool's accounts and does not provide a liquidity. This action may + /// involve holding assets from the caller as a deposit for creating the pool's accounts. + /// + /// The origin must be Signed. + /// + /// - `asset1`: The asset ID of an existing pool with a pair (asset1, asset2). + /// - `asset2`: The asset ID of an existing pool with a pair (asset1, asset2). + /// + /// Emits `Touched` event when successful. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::touch(3))] + pub fn touch( + origin: OriginFor, + asset1: Box, + asset2: Box, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + let pool = Pools::::get(&pool_id).ok_or(Error::::PoolNotFound)?; + let pool_account = + T::PoolLocator::address(&pool_id).map_err(|_| Error::::InvalidAssetPair)?; + + let mut refunds_number: u32 = 0; + if T::Assets::should_touch(*asset1.clone(), &pool_account) { + T::Assets::touch(*asset1, &pool_account, &who)?; + refunds_number += 1; + } + if T::Assets::should_touch(*asset2.clone(), &pool_account) { + T::Assets::touch(*asset2, &pool_account, &who)?; + refunds_number += 1; + } + if T::PoolAssets::should_touch(pool.lp_token.clone(), &pool_account) { + T::PoolAssets::touch(pool.lp_token, &pool_account, &who)?; + refunds_number += 1; + } + Self::deposit_event(Event::Touched { pool_id, who }); + Ok(Some(T::WeightInfo::touch(refunds_number)).into()) + } } impl Pallet { diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index 212f56073f9..9aea19dbf57 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_asset_conversion -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/asset-conversion/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/asset-conversion/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -56,6 +54,7 @@ pub trait WeightInfo { fn remove_liquidity() -> Weight; fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight; fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight; + fn touch(n: u32, ) -> Weight; } /// Weights for `pallet_asset_conversion` using the Substrate node and recommended hardware. @@ -63,7 +62,7 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -77,9 +76,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `910` // Estimated: `6360` - // Minimum execution time: 86_709_000 picoseconds. - Weight::from_parts(88_841_000, 6360) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Minimum execution time: 95_080_000 picoseconds. + Weight::from_parts(97_241_000, 6360) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -98,8 +97,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 148_672_000 picoseconds. - Weight::from_parts(151_824_000, 11426) + // Minimum execution time: 147_652_000 picoseconds. + Weight::from_parts(153_331_000, 11426) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -117,8 +116,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 130_743_000 picoseconds. - Weight::from_parts(132_793_000, 11426) + // Minimum execution time: 130_738_000 picoseconds. + Weight::from_parts(134_350_000, 11426) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -131,10 +130,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 81_173_000 picoseconds. - Weight::from_parts(82_574_000, 990) - // Standard Error: 335_929 - .saturating_add(Weight::from_parts(11_607_291, 0).saturating_mul(n.into())) + // Minimum execution time: 79_681_000 picoseconds. + Weight::from_parts(81_461_000, 990) + // Standard Error: 320_959 + .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -148,21 +147,45 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 80_562_000 picoseconds. - Weight::from_parts(82_501_000, 990) - // Standard Error: 329_460 - .saturating_add(Weight::from_parts(11_295_339, 0).saturating_mul(n.into())) + // Minimum execution time: 78_988_000 picoseconds. + Weight::from_parts(81_025_000, 990) + // Standard Error: 320_021 + .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 45_757_000 picoseconds. + Weight::from_parts(48_502_032, 6360) + // Standard Error: 62_850 + .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -176,9 +199,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `910` // Estimated: `6360` - // Minimum execution time: 86_709_000 picoseconds. - Weight::from_parts(88_841_000, 6360) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Minimum execution time: 95_080_000 picoseconds. + Weight::from_parts(97_241_000, 6360) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -197,8 +220,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 148_672_000 picoseconds. - Weight::from_parts(151_824_000, 11426) + // Minimum execution time: 147_652_000 picoseconds. + Weight::from_parts(153_331_000, 11426) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -216,8 +239,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 130_743_000 picoseconds. - Weight::from_parts(132_793_000, 11426) + // Minimum execution time: 130_738_000 picoseconds. + Weight::from_parts(134_350_000, 11426) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -230,10 +253,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 81_173_000 picoseconds. - Weight::from_parts(82_574_000, 990) - // Standard Error: 335_929 - .saturating_add(Weight::from_parts(11_607_291, 0).saturating_mul(n.into())) + // Minimum execution time: 79_681_000 picoseconds. + Weight::from_parts(81_461_000, 990) + // Standard Error: 320_959 + .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -247,12 +270,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 80_562_000 picoseconds. - Weight::from_parts(82_501_000, 990) - // Standard Error: 329_460 - .saturating_add(Weight::from_parts(11_295_339, 0).saturating_mul(n.into())) + // Minimum execution time: 78_988_000 picoseconds. + Weight::from_parts(81_025_000, 990) + // Standard Error: 320_021 + .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 45_757_000 picoseconds. + Weight::from_parts(48_502_032, 6360) + // Standard Error: 62_850 + .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 37073b28065..9056b1eefbd 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -1697,7 +1697,9 @@ pub mod pallet { fn should_touch(asset: T::AssetId, who: &T::AccountId) -> bool { match Asset::::get(&asset) { + // refer to the [`Self::new_account`] function for more details. Some(info) if info.is_sufficient => false, + Some(_) if frame_system::Pallet::::can_accrue_consumers(who, 2) => false, Some(_) => !Account::::contains_key(asset, who), _ => true, } -- GitLab From d591b16f6b1dec88003323cdae0c3abe3b5c9cbe Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Thu, 18 Apr 2024 13:44:49 +0700 Subject: [PATCH 022/107] Remove NotConcrete error (#3867) # Description - Link to issue: https://github.com/paritytech/polkadot-sdk/issues/3651 polkadot address: 19nSqFQorfF2HxD3oBzWM3oCh4SaCRKWt1yvmgaPYGCo71J --- polkadot/xcm/pallet-xcm/src/lib.rs | 9 ++++++--- polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs | 2 -- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index cf22b86cf82..698ec6998b4 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -535,20 +535,24 @@ pub mod pallet { LockNotFound, /// The unlock operation cannot succeed because there are still consumers of the lock. InUse, - /// Invalid non-concrete asset. - InvalidAssetNotConcrete, /// Invalid asset, reserve chain could not be determined for it. + #[codec(index = 21)] InvalidAssetUnknownReserve, /// Invalid asset, do not support remote asset reserves with different fees reserves. + #[codec(index = 22)] InvalidAssetUnsupportedReserve, /// Too many assets with different reserve locations have been attempted for transfer. + #[codec(index = 23)] TooManyReserves, /// Local XCM execution incomplete. + #[codec(index = 24)] LocalExecutionIncomplete, /// Could not decode XCM. + #[codec(index = 25)] UnableToDecode, /// XCM encoded length is too large. /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. + #[codec(index = 26)] XcmTooLarge, } @@ -565,7 +569,6 @@ pub mod pallet { impl From for Error { fn from(e: AssetTransferError) -> Self { match e { - AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, } } diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs index 6d72eaf680f..22e4a3bd61a 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -23,8 +23,6 @@ use xcm::prelude::*; /// Errors related to determining asset transfer support. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] pub enum Error { - /// Invalid non-concrete asset. - NotConcrete, /// Reserve chain could not be determined for assets. UnknownReserve, } -- GitLab From b6fab8046e42283d14e9fa6beda32c878b3e801e Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 09:40:45 +0200 Subject: [PATCH 023/107] [ci] Run `test-linux-stable-int` on self-hosted GitHub Runners (#4178) PR adds `test-linux-stable-int` and `quick-benchmarks` as github action jobs. It's a copy of `test-linux-stable-int` and `quick-benchmarks` from gitlab ci and now it's needed to make a stress test for self-hosted github runners. `test-linux-stable-int` and `quick-benchmarks` in gitlab are still `Required` whereas this workflow is allowed to fail. cc https://github.com/paritytech/infrastructure/issues/46 --- .github/workflows/test-github-actions.yml | 43 +++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 .github/workflows/test-github-actions.yml diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml new file mode 100644 index 00000000000..a78420dfe5e --- /dev/null +++ b/.github/workflows/test-github-actions.yml @@ -0,0 +1,43 @@ +name: test-github-actions + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + test-linux-stable-int: + runs-on: arc-runners-polkadot-sdk + timeout-minutes: 30 + container: + image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored + quick-benchmarks: + runs-on: arc-runners-polkadot-sdk + timeout-minutes: 30 + container: + image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet -- GitLab From 76719da221d33117aadf6b7b9cc74e4fbeb25b34 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:24:16 +0200 Subject: [PATCH 024/107] [ci] Update ci image with rust 1.77 and 2024-04-10 (#4077) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cc https://github.com/paritytech/ci_cd/issues/974 --------- Co-authored-by: command-bot <> Co-authored-by: Bastian Köcher --- .github/workflows/fmt-check.yml | 2 +- .github/workflows/test-github-actions.yml | 4 +- .gitlab-ci.yml | 3 +- docs/contributor/container.md | 2 +- .../development_environment_advice.rs | 4 +- polkadot/node/core/runtime-api/src/cache.rs | 17 +- polkadot/node/core/runtime-api/src/lib.rs | 19 +- .../network/approval-distribution/src/lib.rs | 1 + .../src/validator_side/mod.rs | 1 + .../src/sender/send_task.rs | 9 +- .../runtime/parachains/src/inclusion/mod.rs | 47 +- substrate/client/consensus/aura/src/lib.rs | 12 +- .../grandpa/src/communication/tests.rs | 10 +- substrate/frame/contracts/fixtures/build.rs | 6 +- .../tests/benchmark_ui/invalid_origin.stderr | 2 +- .../deprecated_where_block.stderr | 534 +++++++++--------- ...umber_of_pallets_exceeds_tuple_size.stderr | 23 +- .../pallet_error_too_large.stderr | 2 +- .../undefined_call_part.stderr | 2 +- .../undefined_validate_unsigned_part.stderr | 60 +- .../support/test/tests/derive_no_bound.rs | 1 + .../call_argument_invalid_bound.stderr | 2 +- .../call_argument_invalid_bound_2.stderr | 6 +- .../call_argument_invalid_bound_3.stderr | 2 +- ...ev_mode_without_arg_max_encoded_len.stderr | 2 +- .../pallet_ui/event_field_not_member.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 20 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 20 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- .../ui/impl_incorrect_method_signature.stderr | 8 +- ...reference_in_impl_runtime_apis_call.stderr | 12 +- 32 files changed, 413 insertions(+), 426 deletions(-) diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index efcf278c46e..324c9bfff7a 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -15,7 +15,7 @@ jobs: os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} container: - image: docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109 + image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml index a78420dfe5e..09cb4a25b9a 100644 --- a/.github/workflows/test-github-actions.yml +++ b/.github/workflows/test-github-actions.yml @@ -13,7 +13,7 @@ jobs: runs-on: arc-runners-polkadot-sdk timeout-minutes: 30 container: - image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" env: RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 @@ -30,7 +30,7 @@ jobs: runs-on: arc-runners-polkadot-sdk timeout-minutes: 30 container: - image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" env: RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: "full" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5e57dd86f14..77d31d96ee1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,8 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + # CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/docs/contributor/container.md b/docs/contributor/container.md index 9c542f411c8..ec51b8b9d7c 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240222 \ + docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` diff --git a/docs/sdk/src/reference_docs/development_environment_advice.rs b/docs/sdk/src/reference_docs/development_environment_advice.rs index 21bbe78836c..9ba95dfa032 100644 --- a/docs/sdk/src/reference_docs/development_environment_advice.rs +++ b/docs/sdk/src/reference_docs/development_environment_advice.rs @@ -38,7 +38,7 @@ //! // Use nightly formatting. //! // See the polkadot-sdk CI job that checks formatting for the current version used in //! // polkadot-sdk. -//! "rust-analyzer.rustfmt.extraArgs": ["+nightly-2024-01-22"], +//! "rust-analyzer.rustfmt.extraArgs": ["+nightly-2024-04-10"], //! } //! ``` //! @@ -79,7 +79,7 @@ //! # Use nightly formatting. //! # See the polkadot-sdk CI job that checks formatting for the current version used in //! # polkadot-sdk. -//! extraArgs = { "+nightly-2024-01-22" }, +//! extraArgs = { "+nightly-2024-04-10" }, //! }, //! }, //! ``` diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 7cd1f7ce728..05efbc533d0 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -24,8 +24,8 @@ use polkadot_primitives::{ CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -561,7 +561,7 @@ pub(crate) enum RequestResult { // The structure of each variant is (relay_parent, [params,]*, result) Authorities(Hash, Vec), Validators(Hash, Vec), - MinimumBackingVotes(Hash, SessionIndex, u32), + MinimumBackingVotes(SessionIndex, u32), ValidatorGroups(Hash, (Vec>, GroupRotationInfo)), AvailabilityCores(Hash, Vec), PersistedValidationData(Hash, ParaId, OccupiedCoreAssumption, Option), @@ -589,19 +589,16 @@ pub(crate) enum RequestResult { FetchOnChainVotes(Hash, Option), PvfsRequirePrecheck(Hash, Vec), // This is a request with side-effects and no result, hence (). - SubmitPvfCheckStatement(Hash, PvfCheckStatement, ValidatorSignature, ()), + #[allow(dead_code)] + SubmitPvfCheckStatement(()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>), KeyOwnershipProof(Hash, ValidatorId, Option), // This is a request with side-effects. - SubmitReportDisputeLost( - Hash, - slashing::DisputeProof, - slashing::OpaqueKeyOwnershipProof, - Option<()>, - ), + #[allow(dead_code)] + SubmitReportDisputeLost(Option<()>), ApprovalVotingParams(Hash, SessionIndex, ApprovalVotingParams), DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index b7995aeeee7..c8b1d61e7be 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -101,7 +101,7 @@ where self.requests_cache.cache_authorities(relay_parent, authorities), Validators(relay_parent, validators) => self.requests_cache.cache_validators(relay_parent, validators), - MinimumBackingVotes(_, session_index, minimum_backing_votes) => self + MinimumBackingVotes(session_index, minimum_backing_votes) => self .requests_cache .cache_minimum_backing_votes(session_index, minimum_backing_votes), ValidatorGroups(relay_parent, groups) => @@ -155,7 +155,7 @@ where self.requests_cache.cache_on_chain_votes(relay_parent, scraped), PvfsRequirePrecheck(relay_parent, pvfs) => self.requests_cache.cache_pvfs_require_precheck(relay_parent, pvfs), - SubmitPvfCheckStatement(_, _, _, ()) => {}, + SubmitPvfCheckStatement(()) => {}, ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), @@ -170,7 +170,7 @@ where .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), RequestResult::ApprovalVotingParams(_relay_parent, session_index, params) => self.requests_cache.cache_approval_voting_params(session_index, params), - SubmitReportDisputeLost(_, _, _, _) => {}, + SubmitReportDisputeLost(_) => {}, DisabledValidators(relay_parent, disabled_validators) => self.requests_cache.cache_disabled_validators(relay_parent, disabled_validators), ParaBackingState(relay_parent, para_id, constraints) => self @@ -370,7 +370,7 @@ where async fn poll_requests(&mut self) { // If there are no active requests, this future should be pending forever. if self.active_requests.len() == 0 { - return futures::pending!() + return futures::pending!(); } // If there are active requests, this will always resolve to `Some(_)` when a request is @@ -439,7 +439,7 @@ where }}; ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr, result = ( $($results:expr),* ) ) => {{ let sender = $sender; - let version: u32 = $version; // enforce type for the version expression + let version: u32 = $version; // enforce type for the version expression let runtime_version = client.api_version_parachain_host(relay_parent).await .unwrap_or_else(|e| { gum::warn!( @@ -570,7 +570,8 @@ where SubmitPvfCheckStatement, submit_pvf_check_statement(stmt, signature), ver = 2, - sender + sender, + result = () ) }, Request::PvfsRequirePrecheck(sender) => { @@ -606,13 +607,15 @@ where SubmitReportDisputeLost, submit_report_dispute_lost(dispute_proof, key_ownership_proof), ver = Request::SUBMIT_REPORT_DISPUTE_LOST_RUNTIME_REQUIREMENT, - sender + sender, + result = () ), Request::MinimumBackingVotes(index, sender) => query!( MinimumBackingVotes, minimum_backing_votes(index), ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT, - sender + sender, + result = (index) ), Request::DisabledValidators(sender) => query!( DisabledValidators, diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index d360a18423e..369d82b45b0 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -148,6 +148,7 @@ enum ApprovalEntryError { InvalidCandidateIndex, DuplicateApproval, UnknownAssignment, + #[allow(dead_code)] AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index d23279e8754..f7b07133bff 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -953,6 +953,7 @@ enum AdvertisementError { /// parent. ProtocolMisuse, /// Advertisement is invalid. + #[allow(dead_code)] Invalid(InsertAdvertisementError), } diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs index 18c66066d16..54ccd10789d 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs @@ -16,7 +16,7 @@ use std::collections::{HashMap, HashSet}; -use futures::{future::RemoteHandle, Future, FutureExt}; +use futures::{Future, FutureExt}; use polkadot_node_network_protocol::{ request_response::{ @@ -64,7 +64,7 @@ pub struct SendTask { /// Status of a particular vote/statement delivery to a particular validator. enum DeliveryStatus { /// Request is still in flight. - Pending(RemoteHandle<()>), + Pending, /// Succeeded - no need to send request to this peer anymore. Succeeded, } @@ -297,9 +297,8 @@ async fn send_requests( metrics.time_dispute_request(), ); - let (remote, remote_handle) = fut.remote_handle(); - ctx.spawn("dispute-sender", remote.boxed()).map_err(FatalError::SpawnTask)?; - statuses.insert(receiver, DeliveryStatus::Pending(remote_handle)); + ctx.spawn("dispute-sender", fut.boxed()).map_err(FatalError::SpawnTask)?; + statuses.insert(receiver, DeliveryStatus::Pending); } let msg = NetworkBridgeTxMessage::SendRequests(reqs, IfDisconnected::ImmediateError); diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 903d01aa5c9..76caf740ebc 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -377,22 +377,45 @@ pub mod pallet { const LOG_TARGET: &str = "runtime::inclusion"; /// The reason that a candidate's outputs were rejected for. -#[derive(derive_more::From)] #[cfg_attr(feature = "std", derive(Debug))] -enum AcceptanceCheckErr { +enum AcceptanceCheckErr { HeadDataTooLarge, /// Code upgrades are not permitted at the current time. PrematureCodeUpgrade, /// The new runtime blob is too large. NewCodeTooLarge, /// The candidate violated this DMP acceptance criteria. - ProcessedDownwardMessages(dmp::ProcessedDownwardMessagesAcceptanceErr), + ProcessedDownwardMessages, /// The candidate violated this UMP acceptance criteria. - UpwardMessages(UmpAcceptanceCheckErr), + UpwardMessages, /// The candidate violated this HRMP watermark acceptance criteria. - HrmpWatermark(hrmp::HrmpWatermarkAcceptanceErr), + HrmpWatermark, /// The candidate violated this outbound HRMP acceptance criteria. - OutboundHrmp(hrmp::OutboundHrmpAcceptanceErr), + OutboundHrmp, +} + +impl From for AcceptanceCheckErr { + fn from(_: dmp::ProcessedDownwardMessagesAcceptanceErr) -> Self { + Self::ProcessedDownwardMessages + } +} + +impl From for AcceptanceCheckErr { + fn from(_: UmpAcceptanceCheckErr) -> Self { + Self::UpwardMessages + } +} + +impl From> for AcceptanceCheckErr { + fn from(_: hrmp::HrmpWatermarkAcceptanceErr) -> Self { + Self::HrmpWatermark + } +} + +impl From for AcceptanceCheckErr { + fn from(_: hrmp::OutboundHrmpAcceptanceErr) -> Self { + Self::OutboundHrmp + } } /// An error returned by [`Pallet::check_upward_messages`] that indicates a violation of one of @@ -1145,7 +1168,7 @@ const fn availability_threshold(n_validators: usize) -> usize { supermajority_threshold(n_validators) } -impl AcceptanceCheckErr { +impl AcceptanceCheckErr { /// Returns the same error so that it can be threaded through a needle of `DispatchError` and /// ultimately returned from a `Dispatchable`. fn strip_into_dispatch_err(self) -> Error { @@ -1154,10 +1177,10 @@ impl AcceptanceCheckErr { HeadDataTooLarge => Error::::HeadDataTooLarge, PrematureCodeUpgrade => Error::::PrematureCodeUpgrade, NewCodeTooLarge => Error::::NewCodeTooLarge, - ProcessedDownwardMessages(_) => Error::::IncorrectDownwardMessageHandling, - UpwardMessages(_) => Error::::InvalidUpwardMessages, - HrmpWatermark(_) => Error::::HrmpWatermarkMishandling, - OutboundHrmp(_) => Error::::InvalidOutboundHrmp, + ProcessedDownwardMessages => Error::::IncorrectDownwardMessageHandling, + UpwardMessages => Error::::InvalidUpwardMessages, + HrmpWatermark => Error::::HrmpWatermarkMishandling, + OutboundHrmp => Error::::InvalidOutboundHrmp, } } } @@ -1300,7 +1323,7 @@ impl CandidateCheckContext { upward_messages: &[primitives::UpwardMessage], hrmp_watermark: BlockNumberFor, horizontal_messages: &[primitives::OutboundHrmpMessage], - ) -> Result<(), AcceptanceCheckErr>> { + ) -> Result<(), AcceptanceCheckErr> { ensure!( head_data.0.len() <= self.config.max_head_data_size as _, AcceptanceCheckErr::HeadDataTooLarge, diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index e220aaac508..2d6264a4892 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -579,15 +579,15 @@ mod tests { type Error = sp_blockchain::Error; struct DummyFactory(Arc); - struct DummyProposer(u64, Arc); + struct DummyProposer(Arc); impl Environment for DummyFactory { type Proposer = DummyProposer; type CreateProposer = futures::future::Ready>; type Error = Error; - fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { - futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) + fn init(&mut self, _: &::Header) -> Self::CreateProposer { + futures::future::ready(Ok(DummyProposer(self.0.clone()))) } } @@ -604,9 +604,9 @@ mod tests { _: Duration, _: Option, ) -> Self::Proposal { - let r = BlockBuilderBuilder::new(&*self.1) - .on_parent_block(self.1.chain_info().best_hash) - .fetch_parent_block_number(&*self.1) + let r = BlockBuilderBuilder::new(&*self.0) + .on_parent_block(self.0.chain_info().best_hash) + .fetch_parent_block_number(&*self.0) .unwrap() .with_inherent_digests(digests) .build() diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index 40d901b2fec..bc3023fc028 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -51,10 +51,8 @@ use std::{ #[derive(Debug)] pub(crate) enum Event { - EventStream(TracingUnboundedSender), WriteNotification(PeerId, Vec), Report(PeerId, ReputationChange), - Announce(Hash), } #[derive(Clone)] @@ -146,15 +144,13 @@ impl NetworkEventStream for TestNetwork { &self, _name: &'static str, ) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test", 100_000); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) + futures::stream::pending().boxed() } } impl NetworkBlock> for TestNetwork { - fn announce_block(&self, hash: Hash, _data: Option>) { - let _ = self.sender.unbounded_send(Event::Announce(hash)); + fn announce_block(&self, _: Hash, _data: Option>) { + unimplemented!(); } fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { diff --git a/substrate/frame/contracts/fixtures/build.rs b/substrate/frame/contracts/fixtures/build.rs index 19aff37c160..baaeaf03420 100644 --- a/substrate/frame/contracts/fixtures/build.rs +++ b/substrate/frame/contracts/fixtures/build.rs @@ -163,7 +163,7 @@ fn invoke_cargo_fmt<'a>( ) -> Result<()> { // If rustfmt is not installed, skip the check. if !Command::new("rustup") - .args(["nightly-2024-01-22", "run", "rustfmt", "--version"]) + .args(["nightly-2024-04-10", "run", "rustfmt", "--version"]) .output() .map_or(false, |o| o.status.success()) { @@ -171,7 +171,7 @@ fn invoke_cargo_fmt<'a>( } let fmt_res = Command::new("rustup") - .args(["nightly-2024-01-22", "run", "rustfmt", "--check", "--config-path"]) + .args(["nightly-2024-04-10", "run", "rustfmt", "--check", "--config-path"]) .arg(config_path) .args(files) .output() @@ -186,7 +186,7 @@ fn invoke_cargo_fmt<'a>( eprintln!("{}\n{}", stdout, stderr); eprintln!( "Fixtures files are not formatted.\n - Please run `rustup nightly-2024-01-22 run rustfmt --config-path {} {}/*.rs`", + Please run `rustup nightly-2024-04-10 run rustfmt --config-path {} {}/*.rs`", config_path.display(), contract_dir.display() ); diff --git a/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr b/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr index 87d4f476a60..30f1289767f 100644 --- a/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr +++ b/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr @@ -2,7 +2,7 @@ error[E0277]: the trait bound `::RuntimeOrigin: --> tests/benchmark_ui/invalid_origin.rs:23:1 | 23 | #[benchmarks] - | ^^^^^^^^^^^^^ the trait `From<{integer}>` is not implemented for `::RuntimeOrigin` + | ^^^^^^^^^^^^^ the trait `From<{integer}>` is not implemented for `::RuntimeOrigin`, which is required by `{integer}: Into<_>` | = note: required for `{integer}` to implement `Into<::RuntimeOrigin>` = note: this error originates in the attribute macro `benchmarks` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 09c4d290ef5..96504b7ce77 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -23,133 +23,139 @@ error: use of deprecated constant `WhereSection::_w`: error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime` + | + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime` | note: required by a bound in `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { | ^^^^^^ required by this bound in `Event` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Clone` --> $RUST/core/src/clone.rs | | pub trait Clone: Sized { | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `EncodeLike` --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Decode` --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` | -note: required because it appears within the type `Event` +note: required because it appears within the type `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { @@ -159,22 +165,21 @@ note: required by a bound in `From` | | pub trait From: Sized { | ^ required by this bound in `From` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` | -note: required because it appears within the type `Event` +note: required because it appears within the type `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { @@ -184,22 +189,7 @@ note: required by a bound in `TryInto` | | pub trait TryInto: Sized { | ^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | - | - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -212,134 +202,157 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `TryFrom` is not implemented for `RawOrigin<_>` | = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | = help: the trait `Callable` is implemented for `Pallet` = note: required for `Pallet` to implement `Callable` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Clone` --> $RUST/core/src/clone.rs | | pub trait Clone: Sized { | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `EncodeLike` --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Decode` --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ +note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` + --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | + | type Config; + | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 @@ -353,165 +366,124 @@ note: required by a bound in `GenesisConfig` | pub struct GenesisConfig { | ^^^^^^ required by this bound in `GenesisConfig` +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ +note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` + --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | + | type Call; + | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Result` --> $RUST/core/src/result.rs | | pub enum Result { | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `TryInto` --> $RUST/core/src/convert/mod.rs | | pub trait TryInto: Sized { | ^^^^^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Result` --> $RUST/core/src/result.rs | | pub enum Result { | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | - | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | -note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs - | - | type Config; - | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | - | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | -note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs - | - | type Call; - | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index 6160f8234a3..dde58ba6959 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -5,23 +5,16 @@ error: The number of pallets exceeds the maximum number of tuple elements. To in | ^^^ error: recursion limit reached while expanding `frame_support::__private::tt_return!` - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:1 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:66:1 | -22 | / #[frame_support::pallet] -23 | | mod pallet { -24 | | #[pallet::config] -25 | | pub trait Config: frame_system::Config {} +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, ... | -66 | |/ construct_runtime! { -67 | || pub struct Runtime -68 | || { -69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -... || -180 | || } -181 | || } - | ||_^ - | |_| - | in this macro invocation +180 | | } +181 | | } + | |_^ | = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`$CRATE`) = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr index ebbb9ffb0eb..75116f71919 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr @@ -10,4 +10,4 @@ error[E0080]: evaluation of constant value failed 97 | | } | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:91:1 | - = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index b9cf58542f2..42e72bc90da 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,4 +13,4 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 72 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index f527cc2ff77..add71c2197e 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 72 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:70:3 @@ -26,54 +26,50 @@ error[E0599]: no variant or associated item named `Pallet` found for enum `Runti | || -^^^^^^ variant or associated item not found in `RuntimeCall` | ||________| | | -... | +71 | | } +72 | | } + | |__- variant or associated item `Pallet` not found for this enum error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | -28 | pub struct Pallet(_); - | -------------------- function or associated item `pre_dispatch` not found for this struct +28 | pub struct Pallet(_); + | -------------------- function or associated item `pre_dispatch` not found for this struct ... -66 | construct_runtime! { - | __^ - | | _| - | || -67 | || pub struct Runtime -68 | || { -69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -71 | || } -72 | || } - | ||_- in this macro invocation -... | +66 | construct_runtime! { + | _^ +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | | } +72 | | } + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `pre_dispatch`, perhaps you need to implement one of them: candidate #1: `SignedExtension` candidate #2: `ValidateUnsigned` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `validate_unsigned` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | -28 | pub struct Pallet(_); - | -------------------- function or associated item `validate_unsigned` not found for this struct +28 | pub struct Pallet(_); + | -------------------- function or associated item `validate_unsigned` not found for this struct ... -66 | construct_runtime! { - | __^ - | | _| - | || -67 | || pub struct Runtime -68 | || { -69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -71 | || } -72 | || } - | ||_- in this macro invocation -... | +66 | construct_runtime! { + | _^ +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | | } +72 | | } + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `validate_unsigned`, perhaps you need to implement one of them: candidate #1: `SignedExtension` candidate #2: `ValidateUnsigned` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index 48a6413c3ac..b1914707805 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -24,6 +24,7 @@ use frame_support::{ }; #[derive(RuntimeDebugNoBound)] +#[allow(dead_code)] struct Unnamed(u64); #[test] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 40f8f129830..2a4ceecd8fa 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -18,7 +18,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar`, which is required by `&::Bar: std::fmt::Debug` = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 5744c636235..fc993e9ff68 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -18,7 +18,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar`, which is required by `&::Bar: std::fmt::Debug` = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` @@ -41,7 +41,7 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is | ------------------------ required by a bound introduced by this call ... 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | ^^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar`, which is required by `::Bar: Encode` | = note: required for `::Bar` to implement `Encode` @@ -49,6 +49,6 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:42 | 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar`, which is required by `::Bar: Decode` | = note: required for `::Bar` to implement `Decode` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index b58e4516bce..d6486a49079 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -18,7 +18,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` 40 | pub fn foo(origin: OriginFor, _bar: Bar) -> DispatchResultWithPostInfo { | ^^^^ `Bar` cannot be formatted using `{:?}` | - = help: the trait `std::fmt::Debug` is not implemented for `Bar` + = help: the trait `std::fmt::Debug` is not implemented for `Bar`, which is required by `&Bar: std::fmt::Debug` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required for `&Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 02ead305dd8..629fefebbe2 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -35,7 +35,7 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied ... | 35 | | #[pallet::storage] 36 | | type MyStorage = StorageValue<_, Vec>; - | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec` + | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 44660d26906..e9c2eae686b 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -16,6 +16,6 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` 41 | B { b: T::Bar }, | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar`, which is required by `&::Bar: std::fmt::Debug` = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index d269e6d2726..c8c41e80501 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -31,7 +31,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: @@ -58,7 +58,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -81,7 +81,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `TypeInfo` is not implemented for `Bar` + | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: bool @@ -102,7 +102,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -119,7 +119,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: @@ -141,7 +141,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -164,7 +164,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -181,7 +181,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: @@ -203,7 +203,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: Box diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 13d761d65d2..08b35eb8ed1 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -31,7 +31,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: @@ -58,7 +58,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -81,7 +81,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `TypeInfo` is not implemented for `Bar` + | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: bool @@ -102,7 +102,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -119,7 +119,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: @@ -141,7 +141,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -164,7 +164,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -181,7 +181,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: @@ -203,7 +203,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: Box diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 504db21feeb..042a6f67fd3 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar` + | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 6fd0b1959c8..9f57b85f3a8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied ... | 41 | | #[pallet::storage] 42 | | type Foo = StorageNMap<_, Key, u32>; - | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar` + | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 788d1807f3b..535bbb178d5 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -9,10 +9,12 @@ note: the struct `RuntimeVersion` is defined here | | use sp_version::RuntimeVersion; | ^^^^^^^^^^^^^^^^^^^^^^^^^^ -help: consider importing one of these items instead +help: consider importing this struct instead + | +37 | fn version() -> sp_version::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ +help: import `RuntimeVersion` directly | -37 | fn version() -> sp_api::__private::RuntimeVersion { - | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 37 | fn version() -> sp_version::RuntimeVersion { | ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index b4df7c06876..f4e0f3b0afb 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -9,10 +9,12 @@ note: the struct `RuntimeVersion` is defined here | | use sp_version::RuntimeVersion; | ^^^^^^^^^^^^^^^^^^^^^^^^^^ -help: consider importing one of these items instead +help: consider importing this struct instead + | +39 | fn version() -> sp_version::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ +help: import `RuntimeVersion` directly | -39 | fn version() -> sp_api::__private::RuntimeVersion { - | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 39 | fn version() -> sp_version::RuntimeVersion { | ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -30,8 +32,8 @@ note: type in trait | 27 | fn test(data: u64); | ^^^ - = note: expected signature `fn(u64)` - found signature `fn(&u64)` + = note: expected signature `fn(_)` + found signature `fn(&_)` error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:11 -- GitLab From ff906127ab513bb42a4288968e0f421f630809e0 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 18 Apr 2024 12:30:31 +0200 Subject: [PATCH 025/107] Improve changelog in the release notes (#4179) This PR adds description to each of the sections of the Changelog part. Changes are based on feedback that it wasn't that clear what exactly `Node Dev`, `Runtime Dev` etc. means. Now, the description for each of those parts is taken directly from the `prdoc` schema. Closes https://github.com/paritytech/release-engineering/issues/197 --- .../release-30_publish_release_draft.yml | 9 +--- scripts/release/build-changelogs.sh | 41 +++++++++++-------- scripts/release/templates/audience.md.tera | 2 + 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 12891ef70af..430b1e26646 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -42,7 +42,6 @@ jobs: URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb - tera --version - name: Download artifacts uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 @@ -70,7 +69,7 @@ jobs: export REF1=$(get_latest_release_tag) if [[ -z "${{ inputs.version }}" ]]; then - export REF2="${{ github.ref }}" + export REF2="${{ github.ref_name }}" else export REF2="${{ inputs.version }}" fi @@ -79,10 +78,6 @@ jobs: ./scripts/release/build-changelogs.sh - echo "Checking the folder state" - pwd - ls -la scripts/release - - name: Archive artifact context.json uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: @@ -151,5 +146,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref }}
+ **New version of polkadot tagged**: ${{ github.ref_name }}
Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index 84054391936..d9a1f11d01e 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -2,11 +2,10 @@ export PRODUCT=polkadot export VERSION=${VERSION:-1.5.0} -export ENGINE=${ENGINE:-docker} +export ENGINE=${ENGINE:-podman} export REF1=${REF1:-'HEAD'} export REF2=${REF2} export RUSTC_STABLE=${RUSTC_STABLE:-'1.0'} -export RUSTC_NIGHTLY=${RUSTC_NIGHTLY:-'1.0'} PROJECT_ROOT=`git rev-parse --show-toplevel` echo $PROJECT_ROOT @@ -27,35 +26,43 @@ echo -e "OUTPUT: \t\t$OUTPUT" mkdir -p $OUTPUT $ENGINE run --rm -v ${PROJECT_ROOT}:/repo paritytech/prdoc load -d "prdoc/$VERSION" --json > $DATA_JSON -# ls -al $DATA_JSON cat $DATA_JSON | jq ' { "prdoc" : .}' > $CONTEXT_JSON -# ls -al $CONTEXT_JSON -# Fetch the list of valid audiences +# Fetch the list of valid audiences and their descriptions SCHEMA_URL=https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json SCHEMA=$(curl -s $SCHEMA_URL | sed 's|^//.*||') -AUDIENCE_ARRAY=$(echo -E $SCHEMA | jq -r '."$defs".audience.oneOf[] | .const') - -readarray -t audiences < <(echo "$AUDIENCE_ARRAY") -declare -p audiences - - -# Generate a changelog -echo "Generating changelog..." -tera -t "${TEMPLATE_CHANGELOG}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/changelog.md" -echo "Changelog ready in $OUTPUT/changelog.md" +aud_desc_array=() +while IFS= read -r line; do + audience=$(jq -r '.const' <<< "$line" ) + description=$(jq -r '.description' <<< "$line") + if [ -n "$audience" ] && [ -n "$description" ]; then + aud_desc_array+=("($audience; $description)") + fi +done < <(jq -c '."$defs".audience_id.oneOf[]' <<< "$SCHEMA") # Generate a release notes doc per audience -for audience in "${audiences[@]}"; do +for tuple in "${aud_desc_array[@]}"; do + audience=$(echo "$tuple" | cut -d ';' -f 1 | sed 's/(//') audience_id="$(tr [A-Z] [a-z] <<< "$audience")" audience_id="$(tr ' ' '_' <<< "$audience_id")" + + description=$(echo "$tuple" | cut -d ';' -f 2 | sed 's/)//') + echo "Processing audience: $audience ($audience_id)" - export TARGET_AUDIENCE=$audience + export TARGET_AUDIENCE="$audience" + export AUDIENCE_DESC="**These changes are relevant to:** $description" + tera -t "${TEMPLATE_AUDIENCE}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_${audience_id}.md" cat "$OUTPUT/relnote_${audience_id}.md" >> "$PROJECT_ROOT/scripts/release/templates/changelog.md" done + +# Generate a changelog containing list of the commits +echo "Generating changelog..." +tera -t "${TEMPLATE_CHANGELOG}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_commits.md" +echo "Changelog ready in $OUTPUT/relnote_commits.md" + # Show the files tree -s -h -c $OUTPUT/ diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index 0b47850e3a3..237643cfa39 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -1,5 +1,7 @@ ### Changelog for `{{ env.TARGET_AUDIENCE }}` +{{ env.AUDIENCE_DESC }} + {% for file in prdoc -%} {% for doc_item in file.content.doc %} {%- if doc_item.audience == env.TARGET_AUDIENCE %} -- GitLab From 4ddeda1e4177c910be5938227aee711efaf559aa Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:22:00 +0200 Subject: [PATCH 026/107] [ci] Use ci-unified reference (#4196) close https://github.com/paritytech/ci_cd/issues/974 --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 77d31d96ee1..5e57dd86f14 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,8 +21,7 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - # CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" + CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" -- GitLab From 91d4a207af43f8f81f56e4f24af74f7c6f590148 Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 18 Apr 2024 16:32:14 +0200 Subject: [PATCH 027/107] chain-selection: allow reverting current block (#4103) Block reversion of the current block is technically possible as can be seen from https://github.com/paritytech/polkadot-sdk/blob/39b1f50f1c251def87c1625d68567ed252dc6272/polkadot/runtime/parachains/src/disputes.rs#L1215-L1223 - [x] Fix the test --- polkadot/node/core/chain-selection/src/lib.rs | 6 +-- .../node/core/chain-selection/src/tests.rs | 43 +++++++++++++++++-- .../node/core/chain-selection/src/tree.rs | 42 ++++++++++-------- 3 files changed, 65 insertions(+), 26 deletions(-) diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs index 6f864fefb61..07c245e839b 100644 --- a/polkadot/node/core/chain-selection/src/lib.rs +++ b/polkadot/node/core/chain-selection/src/lib.rs @@ -619,7 +619,7 @@ async fn handle_active_leaf( // Extract all reversion logs from a header in ascending order. // -// Ignores logs with number >= the block header number. +// Ignores logs with number > the block header number. fn extract_reversion_logs(header: &Header) -> Vec { let number = header.number; let mut logs = header @@ -639,14 +639,14 @@ fn extract_reversion_logs(header: &Header) -> Vec { None }, - Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), + Ok(Some(ConsensusLog::Revert(b))) if b <= number => Some(b), Ok(Some(ConsensusLog::Revert(b))) => { gum::warn!( target: LOG_TARGET, revert_target = b, block_number = number, block_hash = ?header.hash(), - "Block issued invalid revert digest targeting itself or future" + "Block issued invalid revert digest targeting future" ); None diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs index bc998f268a0..1fe87f04cd5 100644 --- a/polkadot/node/core/chain-selection/src/tests.rs +++ b/polkadot/node/core/chain-selection/src/tests.rs @@ -966,19 +966,54 @@ fn ancestor_of_unviable_is_not_leaf_if_has_children() { } #[test] -fn self_and_future_reversions_are_ignored() { +fn self_reversions_are_not_ignored() { test_harness(|backend, _, mut virtual_overseer| async move { let finalized_number = 0; let finalized_hash = Hash::repeat_byte(0); // F <- A1 <- A2 <- A3. // - // A3 reverts itself and future blocks. ignored. + // A3 reverts itself + + let (_, chain_a) = + construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { + if h.number == 3 { + add_reversions(h, vec![3]) + } + }); + + let a2_hash = chain_a.iter().rev().nth(1).unwrap().0.hash(); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ) + .await; + + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_leaves(&backend, vec![a2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a2_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn future_reversions_are_ignored() { + test_harness(|backend, _, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts future blocks. ignored. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { if h.number == 3 { - add_reversions(h, vec![3, 4, 100]) + add_reversions(h, vec![4, 100]) } }); @@ -1006,7 +1041,7 @@ fn revert_finalized_is_ignored() { // F <- A1 <- A2 <- A3. // - // A3 reverts itself and future blocks. ignored. + // A3 reverts finalized F and its ancestors. ignored. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { diff --git a/polkadot/node/core/chain-selection/src/tree.rs b/polkadot/node/core/chain-selection/src/tree.rs index b4aba30368a..1eb6c13a7f8 100644 --- a/polkadot/node/core/chain-selection/src/tree.rs +++ b/polkadot/node/core/chain-selection/src/tree.rs @@ -236,7 +236,7 @@ fn propagate_viability_update( Ok(()) } -/// Imports a new block and applies any reversions to ancestors. +/// Imports a new block and applies any reversions to ancestors or the block itself. pub(crate) fn import_block( backend: &mut OverlayedBackend, block_hash: Hash, @@ -246,25 +246,29 @@ pub(crate) fn import_block( weight: BlockWeight, stagnant_at: Timestamp, ) -> Result<(), Error> { - add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?; - apply_ancestor_reversions(backend, block_hash, block_number, reversion_logs)?; + let block_entry = + add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?; + apply_reversions(backend, block_entry, reversion_logs)?; Ok(()) } // Load the given ancestor's block entry, in descending order from the `block_hash`. -// The ancestor_number must be at least one block less than the `block_number`. +// The ancestor_number must be not higher than the `block_entry`'s. // // The returned entry will be `None` if the range is invalid or any block in the path had // no entry present. If any block entry was missing, it can safely be assumed to // be finalized. fn load_ancestor( backend: &mut OverlayedBackend, - block_hash: Hash, - block_number: BlockNumber, + block_entry: &BlockEntry, ancestor_number: BlockNumber, ) -> Result, Error> { - if block_number <= ancestor_number { + let block_hash = block_entry.block_hash; + let block_number = block_entry.block_number; + if block_number == ancestor_number { + return Ok(Some(block_entry.clone())) + } else if block_number < ancestor_number { return Ok(None) } @@ -300,7 +304,7 @@ fn add_block( parent_hash: Hash, weight: BlockWeight, stagnant_at: Timestamp, -) -> Result<(), Error> { +) -> Result { let mut leaves = backend.load_leaves()?; let parent_entry = backend.load_block_entry(&parent_hash)?; @@ -308,7 +312,7 @@ fn add_block( parent_entry.as_ref().and_then(|parent| parent.non_viable_ancestor_for_child()); // 1. Add the block to the DB assuming it's not reverted. - backend.write_block_entry(BlockEntry { + let block_entry = BlockEntry { block_hash, block_number, parent_hash, @@ -319,7 +323,8 @@ fn add_block( approval: Approval::Unapproved, }, weight, - }); + }; + backend.write_block_entry(block_entry.clone()); // 2. Update leaves if inherited viability is fine. if inherited_viability.is_none() { @@ -344,26 +349,25 @@ fn add_block( stagnant_at_list.push(block_hash); backend.write_stagnant_at(stagnant_at, stagnant_at_list); - Ok(()) + Ok(block_entry) } /// Assuming that a block is already imported, accepts the number of the block /// as well as a list of reversions triggered by the block in ascending order. -fn apply_ancestor_reversions( +fn apply_reversions( backend: &mut OverlayedBackend, - block_hash: Hash, - block_number: BlockNumber, + block_entry: BlockEntry, reversions: Vec, ) -> Result<(), Error> { // Note: since revert numbers are in ascending order, the expensive propagation // of unviability is only heavy on the first log. for revert_number in reversions { - let maybe_block_entry = load_ancestor(backend, block_hash, block_number, revert_number)?; - if let Some(block_entry) = &maybe_block_entry { + let maybe_block_entry = load_ancestor(backend, &block_entry, revert_number)?; + if let Some(entry) = &maybe_block_entry { gum::trace!( target: LOG_TARGET, ?revert_number, - revert_hash = ?block_entry.block_hash, + revert_hash = ?entry.block_hash, "Block marked as reverted via scraped on-chain reversions" ); } @@ -372,8 +376,8 @@ fn apply_ancestor_reversions( maybe_block_entry, None, revert_number, - Some(block_hash), - Some(block_number), + Some(block_entry.block_hash), + Some(block_entry.block_number), )?; } -- GitLab From 9f12d2196e156e8822e5373975644aacfc266d14 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:57:22 +0200 Subject: [PATCH 028/107] [ci] Use native git cli in cargo (#4200) More details can be found [here](https://github.com/paritytech/ci_cd/issues/939#issuecomment-2064061845) --- .github/workflows/test-github-actions.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml index 09cb4a25b9a..c8ce49cb462 100644 --- a/.github/workflows/test-github-actions.yml +++ b/.github/workflows/test-github-actions.yml @@ -8,6 +8,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +env: + CARGO_NET_GIT_FETCH_WITH_CLI: true + jobs: test-linux-stable-int: runs-on: arc-runners-polkadot-sdk -- GitLab From 0e552893d0f656f83d366ae9118aaeb0f898aabf Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:57:23 +0300 Subject: [PATCH 029/107] Fix next_retry busy waiting on first retry (#4192) The `next_retry_time` gets populated when a request receives an error timeout or any other error, after thatn next_retry would check all requests in the queue returns the smallest one, which then gets used to move the main loop by creating a Delay ``` futures_timer::Delay::new(instant.saturating_duration_since(Instant::now())).await, ``` However when we retry a task for the first time we still keep it in the queue an mark it as in flight so its next_retry_time would be the oldest and it would be small than `now`, so the Delay will always triggers, so that would make the main loop essentially busy wait untill we received a response for the retry request. Fix this by excluding the tasks that are already in-flight. --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- .../statement-distribution/src/v2/requests.rs | 2 +- .../src/v2/tests/requests.rs | 26 ++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index fe270c8a58e..1ed18ffd42a 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -288,7 +288,7 @@ impl RequestManager { /// Returns an instant at which the next request to be retried will be ready. pub fn next_retry_time(&mut self) -> Option { let mut next = None; - for (_id, request) in &self.requests { + for (_id, request) in self.requests.iter().filter(|(_id, request)| !request.in_flight) { if let Some(next_retry_time) = request.next_retry_time { if next.map_or(true, |next| next_retry_time < next) { next = Some(next_retry_time); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index dc2c8f55290..8cf13980214 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -2606,7 +2606,31 @@ fn should_delay_before_retrying_dropped_requests() { // Sleep for the given amount of time. This should reset the delay for the first candidate. futures_timer::Delay::new(REQUEST_RETRY_DELAY).await; - // We re-try the first request. + // We re-try the first request the second time drop it again. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateV2(outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1); + assert_eq!(outgoing.payload.mask, mask); + } + ); + } + ); + + assert_matches!( + overseer_recv_with_timeout(&mut overseer, Duration::from_millis(100)).await, + None + ); + + // Sleep for the given amount of time. This should reset the delay for the first candidate. + futures_timer::Delay::new(REQUEST_RETRY_DELAY).await; + + // We re-try the first request, for the third time, so let's answer to it. { let statements = vec![ state -- GitLab From 37e338f0469c2ca5b716c4423d8b683e237ead21 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:57:34 +0300 Subject: [PATCH 030/107] approval-voting: Make sure we always mark approved candidates approved in a different relay chain context (#4153) ... see for more detail why this is needed https://github.com/paritytech/polkadot-sdk/issues/4149#issuecomment-2058472444 ## TODO: - [x] Unittests - [x] Replicate scenario from https://github.com/paritytech/polkadot-sdk/issues/4149 and confirm this fixes it: https://github.com/paritytech/polkadot-sdk/issues/4149 [ Replicated on a zombienet with some hacked nodes, that we can end up in this state where no-wake is schedule and the nodes are pending new assignments] --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- polkadot/node/core/approval-voting/src/lib.rs | 51 +++++ .../node/core/approval-voting/src/tests.rs | 182 +++++++++++++++++- 2 files changed, 232 insertions(+), 1 deletion(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 7ecc2b2595b..b5ed92fa39c 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -978,6 +978,7 @@ where woken_block, woken_candidate, &subsystem.metrics, + &wakeups, ).await? } next_msg = ctx.recv().fuse() => { @@ -1152,6 +1153,7 @@ async fn handle_actions( candidate_hash, delayed_approvals_timers, approval_request, + &wakeups, ) .await? .into_iter() @@ -1663,6 +1665,7 @@ async fn handle_from_overseer( |r| { let _ = res.send(r); }, + &wakeups, ) .await? .0, @@ -2477,6 +2480,7 @@ async fn check_and_import_approval( metrics: &Metrics, approval: IndirectSignedApprovalVoteV2, with_response: impl FnOnce(ApprovalCheckResult) -> T, + wakeups: &Wakeups, ) -> SubsystemResult<(Vec, T)> where Sender: SubsystemSender, @@ -2655,6 +2659,7 @@ where approved_candidate_hash, candidate_entry, ApprovalStateTransition::RemoteApproval(approval.validator), + wakeups, ) .await; actions.extend(new_actions); @@ -2689,6 +2694,10 @@ impl ApprovalStateTransition { ApprovalStateTransition::WakeupProcessed => false, } } + + fn is_remote_approval(&self) -> bool { + matches!(*self, ApprovalStateTransition::RemoteApproval(_)) + } } // Advance the approval state, either by importing an approval vote which is already checked to be @@ -2705,6 +2714,7 @@ async fn advance_approval_state( candidate_hash: CandidateHash, mut candidate_entry: CandidateEntry, transition: ApprovalStateTransition, + wakeups: &Wakeups, ) -> Vec where Sender: SubsystemSender, @@ -2835,6 +2845,43 @@ where status.required_tranches, )); + if is_approved && transition.is_remote_approval() { + // Make sure we wake other blocks in case they have + // a no-show that might be covered by this approval. + for (fork_block_hash, fork_approval_entry) in candidate_entry + .block_assignments + .iter() + .filter(|(hash, _)| **hash != block_hash) + { + let assigned_on_fork_block = validator_index + .as_ref() + .map(|validator_index| fork_approval_entry.is_assigned(*validator_index)) + .unwrap_or_default(); + if wakeups.wakeup_for(*fork_block_hash, candidate_hash).is_none() && + !fork_approval_entry.is_approved() && + assigned_on_fork_block + { + let fork_block_entry = db.load_block_entry(fork_block_hash); + if let Ok(Some(fork_block_entry)) = fork_block_entry { + actions.push(Action::ScheduleWakeup { + block_hash: *fork_block_hash, + block_number: fork_block_entry.block_number(), + candidate_hash, + // Schedule the wakeup next tick, since the assignment must be a + // no-show, because there is no-wakeup scheduled. + tick: tick_now + 1, + }) + } else { + gum::debug!( + target: LOG_TARGET, + ?fork_block_entry, + ?fork_block_hash, + "Failed to load block entry" + ) + } + } + } + } // We have no need to write the candidate entry if all of the following // is true: // @@ -2896,6 +2943,7 @@ async fn process_wakeup( relay_block: Hash, candidate_hash: CandidateHash, metrics: &Metrics, + wakeups: &Wakeups, ) -> SubsystemResult> { let mut span = state .spans @@ -3064,6 +3112,7 @@ async fn process_wakeup( candidate_hash, candidate_entry, ApprovalStateTransition::WakeupProcessed, + wakeups, ) .await, ); @@ -3294,6 +3343,7 @@ async fn issue_approval( candidate_hash: CandidateHash, delayed_approvals_timers: &mut DelayedApprovalTimer, ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest, + wakeups: &Wakeups, ) -> SubsystemResult> { let mut issue_approval_span = state .spans @@ -3415,6 +3465,7 @@ async fn issue_approval( candidate_hash, candidate_entry, ApprovalStateTransition::LocalApproval(validator_index as _), + wakeups, ) .await; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index f7bbbca4b8a..312d805bbef 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -834,7 +834,6 @@ impl ChainBuilder { cur_hash = cur_header.parent_hash; } ancestry.reverse(); - import_block( overseer, ancestry.as_ref(), @@ -1922,6 +1921,187 @@ fn subsystem_assignment_import_updates_candidate_entry_and_schedules_wakeup() { }); } +#[test] +fn subsystem_always_has_a_wakeup_when_pending() { + // Approvals sent after all assignments are no-show, the approval + // should be counted on the fork relay chain on the next tick. + test_approvals_on_fork_are_always_considered_after_no_show( + 30, + vec![(29, false), (30, false), (31, true)], + ); + // Approvals sent before fork no-shows, the approval + // should be counted on the fork relay chain when it no-shows. + test_approvals_on_fork_are_always_considered_after_no_show( + 8, // a tick smaller than the no-show tick which is 30. + vec![(7, false), (8, false), (29, false), (30, true), (31, true)], + ); +} + +fn test_approvals_on_fork_are_always_considered_after_no_show( + tick_to_send_approval: Tick, + expected_approval_status: Vec<(Tick, bool)>, +) { + let config = HarnessConfig::default(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + clock, + sync_oracle_handle: _sync_oracle_handle, + .. + } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + let candidate_hash = Hash::repeat_byte(0x04); + + let candidate_descriptor = make_candidate(ParaId::from(1_u32), &candidate_hash); + let candidate_hash = candidate_descriptor.hash(); + + let block_hash = Hash::repeat_byte(0x01); + let block_hash_fork = Hash::repeat_byte(0x02); + + let candidate_index = 0; + let validator = ValidatorIndex(0); + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + // Add block hash 0x01 and for 0x02 + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(1), + candidates: Some(vec![( + candidate_descriptor.clone(), + CoreIndex(0), + GroupIndex(0), + )]), + session_info: Some(SessionInfo { + validator_groups: IndexedVec::>::from( + vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ], + ), + needed_approvals: 1, + ..session_info(&validators) + }), + end_syncing: false, + }, + ) + .add_block( + block_hash_fork, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(1), + candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), + session_info: Some(SessionInfo { + validator_groups: IndexedVec::>::from( + vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ], + ), + needed_approvals: 1, + ..session_info(&validators) + }), + end_syncing: false, + }, + ) + .build(&mut virtual_overseer) + .await; + + // Send assignments for the same candidate on both forks + let rx = check_and_import_assignment( + &mut virtual_overseer, + block_hash, + candidate_index, + validator, + ) + .await; + assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted)); + + let rx = check_and_import_assignment( + &mut virtual_overseer, + block_hash_fork, + candidate_index, + validator, + ) + .await; + + assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted)); + // Wake on APPROVAL_DELAY first + assert!(clock.inner.lock().current_wakeup_is(2)); + clock.inner.lock().set_tick(2); + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Wake up on no-show + assert!(clock.inner.lock().current_wakeup_is(30)); + + for (tick, status) in expected_approval_status + .iter() + .filter(|(tick, _)| *tick < tick_to_send_approval) + { + // Wake up on no-show + clock.inner.lock().set_tick(*tick); + futures_timer::Delay::new(Duration::from_millis(100)).await; + let block_entry = store.load_block_entry(&block_hash).unwrap().unwrap(); + let block_entry_fork = store.load_block_entry(&block_hash_fork).unwrap().unwrap(); + assert!(!block_entry.is_fully_approved()); + assert_eq!(block_entry_fork.is_fully_approved(), *status); + } + + clock.inner.lock().set_tick(tick_to_send_approval); + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Send the approval for candidate just in the context of 0x01 block. + let rx = check_and_import_approval( + &mut virtual_overseer, + block_hash, + candidate_index, + validator, + candidate_hash, + 1, + false, + None, + ) + .await; + + assert_eq!(rx.await, Ok(ApprovalCheckResult::Accepted),); + + // Check approval status for the fork_block is correctly transitioned. + for (tick, status) in expected_approval_status + .iter() + .filter(|(tick, _)| *tick >= tick_to_send_approval) + { + // Wake up on no-show + clock.inner.lock().set_tick(*tick); + futures_timer::Delay::new(Duration::from_millis(100)).await; + let block_entry = store.load_block_entry(&block_hash).unwrap().unwrap(); + let block_entry_fork = store.load_block_entry(&block_hash_fork).unwrap().unwrap(); + assert!(block_entry.is_fully_approved()); + assert_eq!(block_entry_fork.is_fully_approved(), *status); + } + + virtual_overseer + }); +} + #[test] fn subsystem_process_wakeup_schedules_wakeup() { test_harness(HarnessConfig::default(), |test_harness| async move { -- GitLab From c891fdabf4d519b25829490723fb70b1a2ffc0e5 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:57:44 +0300 Subject: [PATCH 031/107] tx: Remove tx_broadcast transaction from the pool (#4050) This PR ensures that broadcast future cleans-up the submitted extrinsic from the pool, iff the `broadcast_stop` operation has been called. This effectively cleans-up transactions from the pool when the `broadcast_stop` is called. cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- .../tests/transaction_broadcast_tests.rs | 9 ++--- .../src/transaction/transaction_broadcast.rs | 40 +++++++++++++------ 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 77a28968aed..14e188b6a87 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -94,7 +94,7 @@ async fn tx_broadcast_enters_pool() { #[tokio::test] async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, mut exec_middleware, _) = setup_api(Default::default()); + let (_, pool, _, tx_api, exec_middleware, _) = setup_api(Default::default()); // Invalid parameters. let err = tx_api @@ -114,13 +114,10 @@ async fn tx_broadcast_invalid_tx() { assert_eq!(0, pool.status().ready); - // Await the broadcast future to exit. - // Without this we'd be subject to races, where we try to call the stop before the tx is - // dropped. - let _ = get_next_event!(&mut exec_middleware.recv); + // The broadcast future should never be spawned when the tx decoding fails. assert_eq!(0, exec_middleware.num_tasks()); - // The broadcast future was dropped, and the operation is no longer active. + // The operation ID is no longer active. // When the operation is not active, either from the tx being finalized or a // terminal error; the stop method should return an error. let err = tx_api diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 6eaf50d6b2e..ef1a426865d 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -37,7 +37,7 @@ use std::{collections::HashMap, sync::Arc}; use super::error::ErrorBroadcast; /// An API for transaction RPC calls. -pub struct TransactionBroadcast { +pub struct TransactionBroadcast { /// Substrate client. client: Arc, /// Transactions pool. @@ -45,16 +45,18 @@ pub struct TransactionBroadcast { /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, /// The broadcast operation IDs. - broadcast_ids: Arc>>, + broadcast_ids: Arc>>>, } /// The state of a broadcast operation. -struct BroadcastState { +struct BroadcastState { /// Handle to abort the running future that broadcasts the transaction. handle: AbortHandle, + /// Associated tx hash. + tx_hash: ::Hash, } -impl TransactionBroadcast { +impl TransactionBroadcast { /// Creates a new [`TransactionBroadcast`]. pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { TransactionBroadcast { client, pool, executor, broadcast_ids: Default::default() } @@ -106,17 +108,22 @@ where // The unique ID of this operation. let id = self.generate_unique_id(); + // The JSON-RPC server might check whether the transaction is valid before broadcasting it. + // If it does so and if the transaction is invalid, the server should silently do nothing + // and the JSON-RPC client is not informed of the problem. Invalid transactions should still + // count towards the limit to the number of simultaneously broadcasted transactions. + let Ok(decoded_extrinsic) = TransactionFor::::decode(&mut &bytes[..]) else { + return Ok(Some(id)); + }; + // Save the tx hash to remove it later. + let tx_hash = pool.hash_of(&decoded_extrinsic); + let mut best_block_import_stream = Box::pin(self.client.import_notification_stream().filter_map( |notification| async move { notification.is_new_best.then_some(notification.hash) }, )); let broadcast_transaction_fut = async move { - // There is nothing we could do with an extrinsic of invalid format. - let Ok(decoded_extrinsic) = TransactionFor::::decode(&mut &bytes[..]) else { - return; - }; - // Flag to determine if the we should broadcast the transaction again. let mut is_done = false; @@ -169,17 +176,26 @@ where let (fut, handle) = futures::future::abortable(broadcast_transaction_fut); let broadcast_ids = self.broadcast_ids.clone(); let drop_id = id.clone(); + let pool = self.pool.clone(); // The future expected by the executor must be `Future` instead of // `Future>`. - let fut = fut.map(move |_| { + let fut = fut.map(move |result| { // Remove the entry from the broadcast IDs map. - broadcast_ids.write().remove(&drop_id); + let Some(broadcast_state) = broadcast_ids.write().remove(&drop_id) else { return }; + + // The broadcast was not stopped. + if result.is_ok() { + return + } + + // Best effort pool removal (tx can already be finalized). + pool.remove_invalid(&[broadcast_state.tx_hash]); }); // Keep track of this entry and the abortable handle. { let mut broadcast_ids = self.broadcast_ids.write(); - broadcast_ids.insert(id.clone(), BroadcastState { handle }); + broadcast_ids.insert(id.clone(), BroadcastState { handle, tx_hash }); } sc_rpc::utils::spawn_subscription_task(&self.executor, fut); -- GitLab From 88a2f360238787bf5256cfdd14b40c08f519b38e Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 18 Apr 2024 20:19:04 +0300 Subject: [PATCH 032/107] chainHead: Stabilize chainHead to version 1 (#4168) This PR stabilizes the chainHead API to version 1. Needs: - https://github.com/paritytech/polkadot-sdk/pull/3667 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_4168.prdoc | 8 + .../client/rpc-spec-v2/src/chain_head/api.rs | 26 +- .../src/chain_head/subscription/inner.rs | 2 +- .../rpc-spec-v2/src/chain_head/tests.rs | 225 ++++++++---------- 4 files changed, 118 insertions(+), 143 deletions(-) create mode 100644 prdoc/pr_4168.prdoc diff --git a/prdoc/pr_4168.prdoc b/prdoc/pr_4168.prdoc new file mode 100644 index 00000000000..9a498500f08 --- /dev/null +++ b/prdoc/pr_4168.prdoc @@ -0,0 +1,8 @@ +title: Stabilize chianHead RPC class to version 1 + +doc: + - audience: Node Dev + description: | + The chainHead RPC API is stabilized to version 1. + +crates: [ ] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 3851adac264..23cb0bbf545 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -37,15 +37,15 @@ pub trait ChainHeadApi { /// /// This method is unstable and subject to change in the future. #[subscription( - name = "chainHead_unstable_follow" => "chainHead_unstable_followEvent", - unsubscribe = "chainHead_unstable_unfollow", + name = "chainHead_v1_follow" => "chainHead_v1_followEvent", + unsubscribe = "chainHead_v1_unfollow", item = FollowEvent, )] fn chain_head_unstable_follow(&self, with_runtime: bool); /// Retrieves the body (list of transactions) of a pinned block. /// - /// This method should be seen as a complement to `chainHead_unstable_follow`, + /// This method should be seen as a complement to `chainHead_v1_follow`, /// allowing the JSON-RPC client to retrieve more information about a block /// that has been reported. /// @@ -54,7 +54,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_body", raw_method)] + #[method(name = "chainHead_v1_body", raw_method)] async fn chain_head_unstable_body( &self, follow_subscription: String, @@ -63,7 +63,7 @@ pub trait ChainHeadApi { /// Retrieves the header of a pinned block. /// - /// This method should be seen as a complement to `chainHead_unstable_follow`, + /// This method should be seen as a complement to `chainHead_v1_follow`, /// allowing the JSON-RPC client to retrieve more information about a block /// that has been reported. /// @@ -73,7 +73,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_header", raw_method)] + #[method(name = "chainHead_v1_header", raw_method)] async fn chain_head_unstable_header( &self, follow_subscription: String, @@ -85,7 +85,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_storage", raw_method)] + #[method(name = "chainHead_v1_storage", raw_method)] async fn chain_head_unstable_storage( &self, follow_subscription: String, @@ -99,7 +99,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_call", raw_method)] + #[method(name = "chainHead_v1_call", raw_method)] async fn chain_head_unstable_call( &self, follow_subscription: String, @@ -118,7 +118,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_unpin", raw_method)] + #[method(name = "chainHead_v1_unpin", raw_method)] async fn chain_head_unstable_unpin( &self, follow_subscription: String, @@ -131,21 +131,21 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_continue", raw_method)] + #[method(name = "chainHead_v1_continue", raw_method)] async fn chain_head_unstable_continue( &self, follow_subscription: String, operation_id: String, ) -> Result<(), Error>; - /// Stops an operation started with chainHead_unstable_body, chainHead_unstable_call, or - /// chainHead_unstable_storage. If the operation was still in progress, this interrupts it. If + /// Stops an operation started with chainHead_v1_body, chainHead_v1_call, or + /// chainHead_v1_storage. If the operation was still in progress, this interrupts it. If /// the operation was already finished, this call has no effect. /// /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_stopOperation", raw_method)] + #[method(name = "chainHead_v1_stopOperation", raw_method)] async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 0e5ccb91d39..3495d9e5449 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -186,7 +186,7 @@ impl OperationState { /// Stops the operation if `waitingForContinue` event was emitted for the associated /// operation ID. /// - /// Returns nothing in accordance with `chainHead_unstable_stopOperation`. + /// Returns nothing in accordance with `chainHead_v1_stopOperation`. pub fn stop_operation(&self) { // `waitingForContinue` not generated. if !self.shared_state.requested_continue.load(std::sync::atomic::Ordering::Acquire) { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 14f664858a0..4bab2194e08 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -156,7 +156,7 @@ async fn setup_api() -> ( ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -266,7 +266,7 @@ async fn follow_subscription_produces_blocks() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -337,7 +337,7 @@ async fn follow_with_runtime() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -448,14 +448,14 @@ async fn get_header() { // Invalid subscription ID must produce no results. let res: Option = api - .call("chainHead_unstable_header", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_header", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); assert!(res.is_none()); // Valid subscription with invalid block hash will error. let err = api - .call::<_, serde_json::Value>("chainHead_unstable_header", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>("chainHead_v1_header", [&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -463,7 +463,7 @@ async fn get_header() { ); // Obtain the valid header. - let res: String = api.call("chainHead_unstable_header", [&sub_id, &block_hash]).await.unwrap(); + let res: String = api.call("chainHead_v1_header", [&sub_id, &block_hash]).await.unwrap(); let bytes = array_bytes::hex2bytes(&res).unwrap(); let header: Header = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(header, block.header); @@ -476,15 +476,13 @@ async fn get_body() { let invalid_hash = hex_string(&INVALID_HASH); // Subscription ID is invalid. - let response: MethodResponse = api - .call("chainHead_unstable_body", ["invalid_sub_id", &invalid_hash]) - .await - .unwrap(); + let response: MethodResponse = + api.call("chainHead_v1_body", ["invalid_sub_id", &invalid_hash]).await.unwrap(); assert_matches!(response, MethodResponse::LimitReached); // Block hash is invalid. let err = api - .call::<_, serde_json::Value>("chainHead_unstable_body", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>("chainHead_v1_body", [&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -493,7 +491,7 @@ async fn get_body() { // Valid call. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id, &block_hash]).await.unwrap(); let operation_id = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -534,7 +532,7 @@ async fn get_body() { // Valid call to a block with extrinsics. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id, &block_hash]).await.unwrap(); let operation_id = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -556,10 +554,7 @@ async fn call_runtime() { // Subscription ID is invalid. let response: MethodResponse = api - .call( - "chainHead_unstable_call", - ["invalid_sub_id", &block_hash, "BabeApi_current_epoch", "0x00"], - ) + .call("chainHead_v1_call", ["invalid_sub_id", &block_hash, "BabeApi_current_epoch", "0x00"]) .await .unwrap(); assert_matches!(response, MethodResponse::LimitReached); @@ -567,7 +562,7 @@ async fn call_runtime() { // Block hash is invalid. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &invalid_hash, "BabeApi_current_epoch", "0x00"], ) .await @@ -579,7 +574,7 @@ async fn call_runtime() { // Pass an invalid parameters that cannot be decode. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_call", + "chainHead_v1_call", // 0x0 is invalid. [&sub_id, &block_hash, "BabeApi_current_epoch", "0x0"], ) @@ -595,7 +590,7 @@ async fn call_runtime() { let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -614,7 +609,7 @@ async fn call_runtime() { // The `current_epoch` takes no parameters and not draining the input buffer // will cause the execution to fail. let response: MethodResponse = api - .call("chainHead_unstable_call", [&sub_id, &block_hash, "BabeApi_current_epoch", "0x00"]) + .call("chainHead_v1_call", [&sub_id, &block_hash, "BabeApi_current_epoch", "0x00"]) .await .unwrap(); let operation_id = match response { @@ -651,7 +646,7 @@ async fn call_runtime_without_flag() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -685,7 +680,7 @@ async fn call_runtime_without_flag() { let call_parameters = hex_string(&alice_id.encode()); let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -706,7 +701,7 @@ async fn get_storage_hash() { // Subscription ID is invalid. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ "invalid_sub_id", &invalid_hash, @@ -720,7 +715,7 @@ async fn get_storage_hash() { // Block hash is invalid. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &invalid_hash, @@ -736,7 +731,7 @@ async fn get_storage_hash() { // Valid call without storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -779,7 +774,7 @@ async fn get_storage_hash() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -812,7 +807,7 @@ async fn get_storage_hash() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &genesis_hash, @@ -869,7 +864,7 @@ async fn get_storage_multi_query_iter() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -920,7 +915,7 @@ async fn get_storage_multi_query_iter() { let expected_value = hex_string(&CHILD_VALUE); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &genesis_hash, @@ -974,7 +969,7 @@ async fn get_storage_value() { // Subscription ID is invalid. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ "invalid_sub_id", &invalid_hash, @@ -988,7 +983,7 @@ async fn get_storage_value() { // Block hash is invalid. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &invalid_hash, @@ -1004,7 +999,7 @@ async fn get_storage_value() { // Valid call without storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1047,7 +1042,7 @@ async fn get_storage_value() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1079,7 +1074,7 @@ async fn get_storage_value() { let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &genesis_hash, @@ -1121,7 +1116,7 @@ async fn get_storage_non_queryable_key() { let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1146,7 +1141,7 @@ async fn get_storage_non_queryable_key() { let prefixed_key = hex_string(&prefixed_key); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1171,7 +1166,7 @@ async fn get_storage_non_queryable_key() { let prefixed_key = hex_string(&prefixed_key); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1197,7 +1192,7 @@ async fn get_storage_non_queryable_key() { let prefixed_key = hex_string(&prefixed_key); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1227,9 +1222,9 @@ async fn unique_operation_ids() { // Ensure that operation IDs are unique for multiple method calls. for _ in 0..5 { - // Valid `chainHead_unstable_body` call. + // Valid `chainHead_v1_body` call. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id, &block_hash]).await.unwrap(); let operation_id = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -1241,11 +1236,11 @@ async fn unique_operation_ids() { // Ensure uniqueness. assert!(op_ids.insert(operation_id)); - // Valid `chainHead_unstable_storage` call. + // Valid `chainHead_v1_storage` call. let key = hex_string(&KEY); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1266,12 +1261,12 @@ async fn unique_operation_ids() { // Ensure uniqueness. assert!(op_ids.insert(operation_id)); - // Valid `chainHead_unstable_call` call. + // Valid `chainHead_v1_call` call. let alice_id = AccountKeyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -1313,12 +1308,11 @@ async fn separate_operation_ids_for_subscriptions() { .into_rpc(); // Create two separate subscriptions. - let mut sub_first = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub_first = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id_first = sub_first.subscription_id(); let sub_id_first = serde_json::to_string(&sub_id_first).unwrap(); - let mut sub_second = - api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub_second = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id_second = sub_second.subscription_id(); let sub_id_second = serde_json::to_string(&sub_id_second).unwrap(); @@ -1362,17 +1356,15 @@ async fn separate_operation_ids_for_subscriptions() { // Each `chainHead_follow` subscription receives a separate operation ID. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id_first, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id_first, &block_hash]).await.unwrap(); let operation_id: String = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), }; assert_eq!(operation_id, "0"); - let response: MethodResponse = api - .call("chainHead_unstable_body", [&sub_id_second, &block_hash]) - .await - .unwrap(); + let response: MethodResponse = + api.call("chainHead_v1_body", [&sub_id_second, &block_hash]).await.unwrap(); let operation_id_second: String = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -1449,7 +1441,7 @@ async fn follow_generates_initial_blocks() { let block_2_f_hash = block_2_f.header.hash(); client.import(BlockOrigin::Own, block_2_f.clone()).await.unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -1561,7 +1553,7 @@ async fn follow_exceeding_pinned_blocks() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -1640,7 +1632,7 @@ async fn follow_with_unpin() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -1672,17 +1664,14 @@ async fn follow_with_unpin() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &invalid_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -1690,10 +1679,7 @@ async fn follow_with_unpin() { ); // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &block_hash]).await.unwrap(); // Block tree: // finalized_block -> block -> block2 @@ -1754,7 +1740,7 @@ async fn unpin_duplicate_hashes() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -1786,7 +1772,7 @@ async fn unpin_duplicate_hashes() { // Try to unpin duplicate hashes. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", + "chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash]], ) .await @@ -1821,7 +1807,7 @@ async fn unpin_duplicate_hashes() { // Try to unpin duplicate hashes. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", + "chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash_2, &block_hash]], ) .await @@ -1832,7 +1818,7 @@ async fn unpin_duplicate_hashes() { // Can unpin blocks. let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash_2]]) + .call("chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash_2]]) .await .unwrap(); } @@ -1859,7 +1845,7 @@ async fn follow_with_multiple_unpin_hashes() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -1930,16 +1916,13 @@ async fn follow_with_multiple_unpin_hashes() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &invalid_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -1947,14 +1930,14 @@ async fn follow_with_multiple_unpin_hashes() { ); let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_1_hash]) + .call("chainHead_v1_unpin", rpc_params![&sub_id, &block_1_hash]) .await .unwrap(); // One block hash is invalid. Block 1 is already unpinned. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", + "chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_1_hash, &block_2_hash, &block_3_hash]], ) .await @@ -1965,16 +1948,13 @@ async fn follow_with_multiple_unpin_hashes() { // Unpin multiple blocks. let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) + .call("chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) .await .unwrap(); // Check block 2 and 3 are unpinned. let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &block_2_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &block_2_hash]) .await .unwrap_err(); assert_matches!(err, @@ -1982,10 +1962,7 @@ async fn follow_with_multiple_unpin_hashes() { ); let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &block_3_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &block_3_hash]) .await .unwrap_err(); assert_matches!(err, @@ -2016,7 +1993,7 @@ async fn follow_prune_best_block() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -2178,7 +2155,7 @@ async fn follow_prune_best_block() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); } #[tokio::test] @@ -2282,7 +2259,7 @@ async fn follow_forks_pruned_block() { // Block 2_f and 3_f are not pruned, pruning happens at height (N - 1). client.finalize_block(block_3_hash, None).unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -2444,7 +2421,7 @@ async fn follow_report_multiple_pruned_block() { let block_3_f = block_builder.build().unwrap().block; let block_3_f_hash = block_3_f.hash(); client.import(BlockOrigin::Own, block_3_f.clone()).await.unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -2630,7 +2607,7 @@ async fn pin_block_references() { } } - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -2669,10 +2646,7 @@ async fn pin_block_references() { wait_pinned_references(&backend, &hash, 1).await; // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &block_hash]).await.unwrap(); // Make sure unpin clears out the reference. let refs = backend.pin_refs(&hash).unwrap(); @@ -2765,7 +2739,7 @@ async fn follow_finalized_before_new_block() { let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Trigger the `FinalizedNotification` for block 1 before the `BlockImportNotification`, and // expect for the `chainHead` to generate `NewBlock`, `BestBlock` and `Finalized` events. @@ -2870,7 +2844,7 @@ async fn ensure_operation_limits_works() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -2909,7 +2883,7 @@ async fn ensure_operation_limits_works() { ]; let response: MethodResponse = api - .call("chainHead_unstable_storage", rpc_params![&sub_id, &block_hash, items]) + .call("chainHead_v1_storage", rpc_params![&sub_id, &block_hash, items]) .await .unwrap(); let operation_id = match response { @@ -2932,7 +2906,7 @@ async fn ensure_operation_limits_works() { let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -2977,7 +2951,7 @@ async fn check_continue_operation() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -3014,17 +2988,17 @@ async fn check_continue_operation() { // Invalid subscription ID must produce no results. let _res: () = api - .call("chainHead_unstable_continue", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_continue", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Invalid operation ID must produce no results. - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &invalid_hash]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &invalid_hash]).await.unwrap(); // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -3060,7 +3034,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3079,7 +3053,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3099,7 +3073,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3118,7 +3092,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3162,7 +3136,7 @@ async fn stop_storage_operation() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -3196,20 +3170,17 @@ async fn stop_storage_operation() { // Invalid subscription ID must produce no results. let _res: () = api - .call("chainHead_unstable_stopOperation", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_stopOperation", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Invalid operation ID must produce no results. - let _res: () = api - .call("chainHead_unstable_stopOperation", [&sub_id, &invalid_hash]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_stopOperation", [&sub_id, &invalid_hash]).await.unwrap(); // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -3241,10 +3212,7 @@ async fn stop_storage_operation() { ); // Stop the operation. - let _res: () = api - .call("chainHead_unstable_stopOperation", [&sub_id, &operation_id]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_stopOperation", [&sub_id, &operation_id]).await.unwrap(); does_not_produce_event::>( &mut sub, @@ -3272,7 +3240,7 @@ async fn storage_closest_merkle_value() { // Valid call with storage at the keys. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -3466,7 +3434,7 @@ async fn chain_head_stop_all_subscriptions() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Ensure the imported block is propagated and pinned for this subscription. assert_matches!( @@ -3500,8 +3468,7 @@ async fn chain_head_stop_all_subscriptions() { ); } - let mut second_sub = - api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut second_sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Lagging detected, the stop event is delivered immediately. assert_matches!( get_next_event::>(&mut second_sub).await, @@ -3512,14 +3479,14 @@ async fn chain_head_stop_all_subscriptions() { assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); // Other subscriptions cannot be started until the suspension period is over. - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Should receive the stop event immediately. assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); // For the next subscription, lagging distance must be smaller. client.finalize_block(parent_hash, None).unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::Initialized(_) @@ -3681,12 +3648,12 @@ async fn chain_head_limit_reached() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; - let error = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap_err(); + let error = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap_err(); assert!(error .to_string() .contains("Maximum number of chainHead_follow has been reached")); @@ -3696,7 +3663,7 @@ async fn chain_head_limit_reached() { // Ensure the `chainHead_unfollow` is propagated to the server. tokio::time::sleep(std::time::Duration::from_secs(5)).await; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; } @@ -3723,7 +3690,7 @@ async fn follow_unique_pruned_blocks() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -3827,7 +3794,7 @@ async fn follow_unique_pruned_blocks() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); // Import block 7 and check it. let block_7_hash = import_block(client.clone(), block_6_hash, 3).await.hash(); -- GitLab From 98a364fe6e7abf10819f5fddd3de0588f7c38700 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 19 Apr 2024 07:34:26 +0300 Subject: [PATCH 033/107] rpc-v2: Limit transactionBroadcast calls to 16 (#3772) This PR limits the number of active calls to the transactionBroadcast APIs to 16. cc @paritytech/subxt-team Closes: https://github.com/paritytech/polkadot-sdk/issues/3081 --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../client/rpc-spec-v2/src/transaction/api.rs | 8 +-- .../src/transaction/tests/setup.rs | 11 ++- .../tests/transaction_broadcast_tests.rs | 68 +++++++++++++++++-- .../src/transaction/transaction.rs | 1 + .../src/transaction/transaction_broadcast.rs | 63 +++++++++++++++-- substrate/client/service/src/builder.rs | 3 + 6 files changed, 133 insertions(+), 21 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index 33af9c95333..119bf270c63 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -47,14 +47,14 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_broadcast")] - fn broadcast(&self, bytes: Bytes) -> RpcResult>; + #[method(name = "transaction_unstable_broadcast", raw_method)] + async fn broadcast(&self, bytes: Bytes) -> RpcResult>; /// Broadcast an extrinsic to the chain. /// /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_stop")] - fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; + #[method(name = "transaction_unstable_stop", raw_method)] + async fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs index 4a15657a7f6..570174a3db6 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs @@ -67,6 +67,7 @@ fn maintained_pool( pub fn setup_api( options: Options, + max_tx_per_connection: usize, ) -> ( Arc, Arc, @@ -85,9 +86,13 @@ pub fn setup_api( let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); - let tx_api = - RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) - .into_rpc(); + let tx_api = RpcTransactionBroadcast::new( + client_mock.clone(), + pool.clone(), + Arc::new(task_executor), + max_tx_per_connection, + ) + .into_rpc(); (api, pool, client_mock, tx_api, executor_recv, pool_state) } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 14e188b6a87..f4a69bd6ed4 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -26,6 +26,8 @@ use std::sync::Arc; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; +const MAX_TX_PER_CONNECTION: usize = 4; + // Test helpers. use crate::transaction::tests::{ middleware_pool::{MiddlewarePoolEvent, TxStatusTypeTest}, @@ -35,7 +37,7 @@ use crate::transaction::tests::{ #[tokio::test] async fn tx_broadcast_enters_pool() { let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(Default::default()); + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Start at block 1. let block_1_header = api.push_block(1, vec![], true); @@ -94,7 +96,8 @@ async fn tx_broadcast_enters_pool() { #[tokio::test] async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, exec_middleware, _) = setup_api(Default::default()); + let (_, pool, _, tx_api, exec_middleware, _) = + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Invalid parameters. let err = tx_api @@ -131,7 +134,7 @@ async fn tx_broadcast_invalid_tx() { #[tokio::test] async fn tx_stop_with_invalid_operation_id() { - let (_, _, _, tx_api, _, _) = setup_api(Default::default()); + let (_, _, _, tx_api, _, _) = setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Make an invalid stop call. let err = tx_api @@ -146,7 +149,7 @@ async fn tx_stop_with_invalid_operation_id() { #[tokio::test] async fn tx_broadcast_resubmits_future_nonce_tx() { let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(Default::default()); + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Start at block 1. let block_1_header = api.push_block(1, vec![], true); @@ -237,7 +240,7 @@ async fn tx_broadcast_resubmits_future_nonce_tx() { #[tokio::test] async fn tx_broadcast_stop_after_broadcast_finishes() { let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(Default::default()); + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Start at block 1. let block_1_header = api.push_block(1, vec![], true); @@ -320,7 +323,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { }; let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(options); + setup_api(options, MAX_TX_PER_CONNECTION); let uxt = uxt(Alice, ALICE_NONCE); let xt = hex_string(&uxt.encode()); @@ -439,7 +442,8 @@ async fn tx_broadcast_resubmits_dropped_tx() { ban_time: std::time::Duration::ZERO, }; - let (api, pool, client_mock, tx_api, _, mut pool_middleware) = setup_api(options); + let (api, pool, client_mock, tx_api, _, mut pool_middleware) = + setup_api(options, MAX_TX_PER_CONNECTION); let current_uxt = uxt(Alice, ALICE_NONCE); let current_xt = hex_string(¤t_uxt.encode()); @@ -518,3 +522,53 @@ async fn tx_broadcast_resubmits_dropped_tx() { // The dropped transaction was resubmitted. assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); } + +#[tokio::test] +async fn tx_broadcast_limit_reached() { + // One operation per connection. + let (api, _pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default(), 1); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + assert_eq!(1, exec_middleware.num_tasks()); + + let operation_id_limit_reached: Option = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + assert!(operation_id_limit_reached.is_none(), "No operation ID => tx was rejected"); + + // We still have in flight one operation. + assert_eq!(1, exec_middleware.num_tasks()); + + // Force the future to exit by calling stop. + let _: () = tx_api + .call("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap(); + + // Ensure the broadcast future finishes. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // Can resubmit again now. + let _operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 6a7c69b8f7d..723440d1b11 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -26,6 +26,7 @@ use crate::{ }, SubscriptionTaskExecutor, }; + use codec::Decode; use futures::{StreamExt, TryFutureExt}; use jsonrpsee::{core::async_trait, PendingSubscriptionSink}; diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index ef1a426865d..68c19010e31 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -18,11 +18,17 @@ //! API implementation for broadcasting transactions. -use crate::{transaction::api::TransactionBroadcastApiServer, SubscriptionTaskExecutor}; +use crate::{ + common::connections::RpcConnections, transaction::api::TransactionBroadcastApiServer, + SubscriptionTaskExecutor, +}; use codec::Decode; use futures::{FutureExt, Stream, StreamExt}; use futures_util::stream::AbortHandle; -use jsonrpsee::core::{async_trait, RpcResult}; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + ConnectionDetails, +}; use parking_lot::RwLock; use rand::{distributions::Alphanumeric, Rng}; use sc_client_api::BlockchainEvents; @@ -46,6 +52,8 @@ pub struct TransactionBroadcast { executor: SubscriptionTaskExecutor, /// The broadcast operation IDs. broadcast_ids: Arc>>>, + /// Keep track of how many concurrent operations are active for each connection. + rpc_connections: RpcConnections, } /// The state of a broadcast operation. @@ -58,8 +66,19 @@ struct BroadcastState { impl TransactionBroadcast { /// Creates a new [`TransactionBroadcast`]. - pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { - TransactionBroadcast { client, pool, executor, broadcast_ids: Default::default() } + pub fn new( + client: Arc, + pool: Arc, + executor: SubscriptionTaskExecutor, + max_transactions_per_connection: usize, + ) -> Self { + TransactionBroadcast { + client, + pool, + executor, + broadcast_ids: Default::default(), + rpc_connections: RpcConnections::new(max_transactions_per_connection), + } } /// Generate an unique operation ID for the `transaction_broadcast` RPC method. @@ -102,12 +121,26 @@ where ::Hash: Unpin, Client: HeaderBackend + BlockchainEvents + Send + Sync + 'static, { - fn broadcast(&self, bytes: Bytes) -> RpcResult> { + async fn broadcast( + &self, + connection_details: ConnectionDetails, + bytes: Bytes, + ) -> RpcResult> { let pool = self.pool.clone(); // The unique ID of this operation. let id = self.generate_unique_id(); + // Ensure that the connection has not reached the maximum number of active operations. + let Some(reserved_connection) = self.rpc_connections.reserve_space(connection_details.id()) + else { + return Ok(None) + }; + let Some(reserved_identifier) = reserved_connection.register(id.clone()) else { + // This can only happen if the generated operation ID is not unique. + return Ok(None) + }; + // The JSON-RPC server might check whether the transaction is valid before broadcasting it. // If it does so and if the transaction is invalid, the server should silently do nothing // and the JSON-RPC client is not informed of the problem. Invalid transactions should still @@ -118,7 +151,11 @@ where // Save the tx hash to remove it later. let tx_hash = pool.hash_of(&decoded_extrinsic); - let mut best_block_import_stream = + // The compiler can no longer deduce the type of the stream and complains + // about `one type is more general than the other`. + let mut best_block_import_stream: std::pin::Pin< + Box::Hash> + Send>, + > = Box::pin(self.client.import_notification_stream().filter_map( |notification| async move { notification.is_new_best.then_some(notification.hash) }, )); @@ -180,6 +217,9 @@ where // The future expected by the executor must be `Future` instead of // `Future>`. let fut = fut.map(move |result| { + // Connection space is cleaned when this object is dropped. + drop(reserved_identifier); + // Remove the entry from the broadcast IDs map. let Some(broadcast_state) = broadcast_ids.write().remove(&drop_id) else { return }; @@ -203,7 +243,16 @@ where Ok(Some(id)) } - fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast> { + async fn stop_broadcast( + &self, + connection_details: ConnectionDetails, + operation_id: String, + ) -> Result<(), ErrorBroadcast> { + // The operation ID must correlate to the same connection ID. + if !self.rpc_connections.contains_identifier(connection_details.id(), &operation_id) { + return Err(ErrorBroadcast::InvalidOperationID) + } + let mut broadcast_ids = self.broadcast_ids.write(); let Some(broadcast_state) = broadcast_ids.remove(&operation_id) else { diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 830f9884719..d0d7cba3862 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -644,10 +644,13 @@ where (chain, state, child_state) }; + const MAX_TRANSACTION_PER_CONNECTION: usize = 16; + let transaction_broadcast_rpc_v2 = sc_rpc_spec_v2::transaction::TransactionBroadcast::new( client.clone(), transaction_pool.clone(), task_executor.clone(), + MAX_TRANSACTION_PER_CONNECTION, ) .into_rpc(); -- GitLab From 4f125d1928648c53db377d0ccc68fa2d705ed9ea Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Fri, 19 Apr 2024 09:35:56 +0200 Subject: [PATCH 034/107] Update subsystem-benchmark params (#4201) - Returned latency (with it, results are more stable) - The threshold is weakened - Increased number of runs --- .../availability-distribution-regression-bench.rs | 14 ++++++-------- .../availability-recovery-regression-bench.rs | 10 ++++------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 0d4f4f49e31..5e3072be3a8 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -31,7 +31,7 @@ use polkadot_subsystem_bench::{ }; use std::io::Write; -const BENCH_COUNT: usize = 5; +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -40,8 +40,6 @@ fn main() -> Result<(), String> { config.n_cores = 10; config.n_validators = 500; config.num_blocks = 3; - config.connectivity = 100; - config.latency = None; config.generate_pov_sizes(); let state = TestState::new(&config); @@ -75,13 +73,13 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 433.3, 0.001), - ("Sent to peers", 18480.0, 0.001), + ("Received from peers", 433.3333, 0.001), + ("Sent to peers", 18479.9000, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("availability-distribution", 0.012, 0.05), - ("availability-store", 0.153, 0.05), - ("bitfield-distribution", 0.026, 0.05), + ("availability-distribution", 0.0123, 0.1), + ("availability-store", 0.1597, 0.1), + ("bitfield-distribution", 0.0223, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 9be147bda93..d9bdc1a2d94 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -32,7 +32,7 @@ use polkadot_subsystem_bench::{ }; use std::io::Write; -const BENCH_COUNT: usize = 5; +const BENCH_COUNT: usize = 10; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -40,8 +40,6 @@ fn main() -> Result<(), String> { let options = DataAvailabilityReadOptions { fetch_from_backers: true }; let mut config = TestConfiguration::default(); config.num_blocks = 3; - config.connectivity = 100; - config.latency = None; config.generate_pov_sizes(); let state = TestState::new(&config); @@ -73,10 +71,10 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 307200.000, 0.001), - ("Sent to peers", 1.667, 0.001), + ("Received from peers", 307203.0000, 0.001), + ("Sent to peers", 1.6667, 0.001), ])); - messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 12.8338, 0.1)])); if messages.is_empty() { Ok(()) -- GitLab From 04a9071e2a5ba903648f8db19066e671659850fb Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 19 Apr 2024 11:15:59 +0300 Subject: [PATCH 035/107] Use higher priority for PVF preparation in dispute/approval context (#4172) Related to https://github.com/paritytech/polkadot-sdk/issues/4126 discussion Currently all preparations have same priority and this is not ideal in all cases. This change should improve the finality time in the context of on-demand parachains and when `ExecutorParams` are updated on-chain and a rebuild of all artifacts is required. The desired effect is to speed up approval and dispute PVF executions which require preparation and delay backing executions which require preparation. --------- Signed-off-by: Andrei Sandu --- .../node/core/candidate-validation/src/lib.rs | 40 ++++++++++++++----- .../core/candidate-validation/src/tests.rs | 2 + polkadot/node/core/pvf/src/host.rs | 2 +- polkadot/node/core/pvf/src/priority.rs | 7 ++-- 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index ec24434db24..8663dc43835 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -657,7 +657,14 @@ async fn validate_candidate_exhaustive( PrepareJobKind::Compilation, ); - validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await + validation_backend + .validate_candidate( + pvf, + exec_timeout, + params.encode(), + polkadot_node_core_pvf::Priority::Normal, + ) + .await }, PvfExecKind::Approval => validation_backend @@ -667,6 +674,7 @@ async fn validate_candidate_exhaustive( params, executor_params, PVF_APPROVAL_EXECUTION_RETRY_DELAY, + polkadot_node_core_pvf::Priority::Critical, ) .await, }; @@ -749,10 +757,15 @@ trait ValidationBackend { pvf: PvfPrepData, exec_timeout: Duration, encoded_params: Vec, + // The priority for the preparation job. + prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result; - /// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered - /// that may have been transient. + /// Tries executing a PVF. Will retry once if an error is encountered that may have + /// been transient. + /// + /// The `prepare_priority` is relevant in the context of the caller. Currently we expect + /// that `approval` context has priority over `backing` context. /// /// NOTE: Should retry only on errors that are a result of execution itself, and not of /// preparation. @@ -763,6 +776,8 @@ trait ValidationBackend { params: ValidationParams, executor_params: ExecutorParams, retry_delay: Duration, + // The priority for the preparation job. + prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. @@ -776,8 +791,10 @@ trait ValidationBackend { // long. let total_time_start = Instant::now(); - let mut validation_result = - self.validate_candidate(pvf.clone(), exec_timeout, params.encode()).await; + // Use `Priority::Critical` as finality trumps parachain liveliness. + let mut validation_result = self + .validate_candidate(pvf.clone(), exec_timeout, params.encode(), prepare_priority) + .await; if validation_result.is_ok() { return validation_result } @@ -851,8 +868,9 @@ trait ValidationBackend { // Encode the params again when re-trying. We expect the retry case to be relatively // rare, and we want to avoid unconditionally cloning data. - validation_result = - self.validate_candidate(pvf.clone(), new_timeout, params.encode()).await; + validation_result = self + .validate_candidate(pvf.clone(), new_timeout, params.encode(), prepare_priority) + .await; } } @@ -870,11 +888,13 @@ impl ValidationBackend for ValidationHost { pvf: PvfPrepData, exec_timeout: Duration, encoded_params: Vec, + // The priority for the preparation job. + prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { - let priority = polkadot_node_core_pvf::Priority::Normal; - let (tx, rx) = oneshot::channel(); - if let Err(err) = self.execute_pvf(pvf, exec_timeout, encoded_params, priority, tx).await { + if let Err(err) = + self.execute_pvf(pvf, exec_timeout, encoded_params, prepare_priority, tx).await + { return Err(InternalValidationError::HostCommunication(format!( "cannot send pvf to the validation host, it might have shut down: {:?}", err diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index f646f853549..e492d51e239 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -368,6 +368,7 @@ impl ValidationBackend for MockValidateCandidateBackend { _pvf: PvfPrepData, _timeout: Duration, _encoded_params: Vec, + _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { // This is expected to panic if called more times than expected, indicating an error in the // test. @@ -1044,6 +1045,7 @@ impl ValidationBackend for MockPreCheckBackend { _pvf: PvfPrepData, _timeout: Duration, _encoded_params: Vec, + _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { unreachable!() } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 59d5a7e20a8..247d753d7c4 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -197,7 +197,7 @@ impl Config { prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), prepare_workers_soft_max_num: 1, - prepare_workers_hard_max_num: 1, + prepare_workers_hard_max_num: 2, execute_worker_program_path, execute_worker_spawn_timeout: Duration::from_secs(3), diff --git a/polkadot/node/core/pvf/src/priority.rs b/polkadot/node/core/pvf/src/priority.rs index d1ef9c604b1..0d18d4b484c 100644 --- a/polkadot/node/core/pvf/src/priority.rs +++ b/polkadot/node/core/pvf/src/priority.rs @@ -14,17 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -/// A priority assigned to execution of a PVF. +/// A priority assigned to preparation of a PVF. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum Priority { /// Normal priority for things that do not require immediate response, but still need to be /// done pretty quick. /// - /// Approvals and disputes fall into this category. + /// Backing falls into this category. Normal, /// This priority is used for requests that are required to be processed as soon as possible. /// - /// For example, backing is on a critical path and requires execution as soon as possible. + /// Disputes and approvals are on a critical path and require execution as soon as + /// possible to not delay finality. Critical, } -- GitLab From 21308d893ef0594538aee73cbdc3905189be0b7b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 19 Apr 2024 11:34:46 +0300 Subject: [PATCH 036/107] Fixed GrandpaConsensusLogReader::find_scheduled_change (#4208) --- bridges/primitives/header-chain/src/lib.rs | 60 +++++++++++++++++----- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index 98fb9ff83d8..ad496012c6a 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -32,7 +32,9 @@ use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; use frame_support::PalletError; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; -use sp_consensus_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; +use sp_consensus_grandpa::{ + AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID, +}; use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug}; use sp_std::{boxed::Box, vec::Vec}; @@ -147,24 +149,23 @@ pub struct GrandpaConsensusLogReader(sp_std::marker::PhantomData impl GrandpaConsensusLogReader { /// Find and return scheduled (regular) change digest item. - pub fn find_scheduled_change( - digest: &Digest, - ) -> Option> { + pub fn find_scheduled_change(digest: &Digest) -> Option> { + use sp_runtime::generic::OpaqueDigestItemId; + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; + // find the first consensus digest with the right ID which converts to // the right kind of consensus log. - digest - .convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID)) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }) + digest.convert_first(|l| l.try_to(id).and_then(filter_log)) } /// Find and return forced change digest item. Or light client can't do anything /// with forced changes, so we can't accept header with the forced change digest. - pub fn find_forced_change( - digest: &Digest, - ) -> Option<(Number, sp_consensus_grandpa::ScheduledChange)> { + pub fn find_forced_change(digest: &Digest) -> Option<(Number, ScheduledChange)> { // find the first consensus digest with the right ID which converts to // the right kind of consensus log. digest @@ -346,7 +347,7 @@ mod tests { use super::*; use bp_runtime::ChainId; use frame_support::weights::Weight; - use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature}; + use sp_runtime::{testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature}; struct TestChain; @@ -385,4 +386,35 @@ mod tests { max_expected_submit_finality_proof_arguments_size::(false, 100), ); } + + #[test] + fn find_scheduled_change_works() { + let scheduled_change = ScheduledChange { next_authorities: vec![], delay: 0 }; + + // first + let mut digest = Digest::default(); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(), + )); + assert_eq!( + GrandpaConsensusLogReader::find_scheduled_change(&digest), + Some(scheduled_change.clone()) + ); + + // not first + let mut digest = Digest::default(); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::::OnDisabled(0).encode(), + )); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(), + )); + assert_eq!( + GrandpaConsensusLogReader::find_scheduled_change(&digest), + Some(scheduled_change.clone()) + ); + } } -- GitLab From 69f4373178f33d702a7e02e71358eb826877d6f8 Mon Sep 17 00:00:00 2001 From: Bulat Saifullin Date: Fri, 19 Apr 2024 12:04:03 +0300 Subject: [PATCH 037/107] Provide WSS bootnodes for Rococo and Westend parachains (#4161) Some Rococo parachains lacked WS nodes. `ws/wss` endpoints are necessary for using light clients on the testnet. Changes: 1. Add `ws/wss` endpoints to all testnet parachains. 2. Remove decommissioned nodes (`people-collator-node-2` `people-collator-node-3` ). --- cumulus/parachains/chain-specs/asset-hub-rococo.json | 6 +++++- .../parachains/chain-specs/asset-hub-westend.json | 4 ++++ .../parachains/chain-specs/bridge-hub-rococo.json | 6 +++++- .../parachains/chain-specs/bridge-hub-westend.json | 4 ++++ .../parachains/chain-specs/collectives-westend.json | 2 ++ cumulus/parachains/chain-specs/contracts-rococo.json | 2 ++ cumulus/parachains/chain-specs/coretime-rococo.json | 8 ++++++-- cumulus/parachains/chain-specs/coretime-westend.json | 4 ++++ cumulus/parachains/chain-specs/people-rococo.json | 12 +++++------- cumulus/parachains/chain-specs/people-westend.json | 4 +++- 10 files changed, 40 insertions(+), 12 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-rococo.json b/cumulus/parachains/chain-specs/asset-hub-rococo.json index 900d9f0ffb2..87ff2fb220a 100644 --- a/cumulus/parachains/chain-specs/asset-hub-rococo.json +++ b/cumulus/parachains/chain-specs/asset-hub-rococo.json @@ -4,7 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", - "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS" + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS", + "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS", + "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 670935c9d24..3752213e702 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -5,6 +5,10 @@ "bootNodes": [ "/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", "/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", + "/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", + "/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", + "/dns/westend-asset-hub-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", + "/dns/westend-asset-hub-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", "/dns/boot.stake.plus/tcp/33333/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", diff --git a/cumulus/parachains/chain-specs/bridge-hub-rococo.json b/cumulus/parachains/chain-specs/bridge-hub-rococo.json index 6b430678a86..53aef58422d 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-rococo.json +++ b/cumulus/parachains/chain-specs/bridge-hub-rococo.json @@ -4,7 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", - "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ" + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ", + "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ", + "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 447207a5810..5140071ec44 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -5,6 +5,10 @@ "bootNodes": [ "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", + "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", + "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", + "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", + "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index e459c631f8b..fdd6348f02a 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -5,6 +5,8 @@ "bootNodes": [ "/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", + "/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", + "/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", "/dns/westend-collectives-collator-0.polkadot.io/tcp/443/wss/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", "/dns/boot.stake.plus/tcp/38333/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", diff --git a/cumulus/parachains/chain-specs/contracts-rococo.json b/cumulus/parachains/chain-specs/contracts-rococo.json index 422268a5efd..71783481e5c 100644 --- a/cumulus/parachains/chain-specs/contracts-rococo.json +++ b/cumulus/parachains/chain-specs/contracts-rococo.json @@ -5,6 +5,8 @@ "bootNodes": [ "/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", + "/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", + "/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", "/dns/rococo-contracts-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh" ], diff --git a/cumulus/parachains/chain-specs/coretime-rococo.json b/cumulus/parachains/chain-specs/coretime-rococo.json index 39506095bfe..082e7dd26a9 100644 --- a/cumulus/parachains/chain-specs/coretime-rococo.json +++ b/cumulus/parachains/chain-specs/coretime-rococo.json @@ -4,7 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", - "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX", + "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX", + "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" ], "telemetryEndpoints": null, "protocolId": null, @@ -67,4 +71,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index 8f096fa6a96..85e129e6848 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -5,6 +5,10 @@ "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM", "/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", diff --git a/cumulus/parachains/chain-specs/people-rococo.json b/cumulus/parachains/chain-specs/people-rococo.json index b2819157152..a4361b77df7 100644 --- a/cumulus/parachains/chain-specs/people-rococo.json +++ b/cumulus/parachains/chain-specs/people-rococo.json @@ -4,13 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", - "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", - "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", - "/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT", - "/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT", - "/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG", - "/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG" + "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", + "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", + "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", + "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H" ], "telemetryEndpoints": null, "protocolId": null, @@ -79,4 +77,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index 6dd8579cf25..93b8c064113 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -4,8 +4,10 @@ "chainType": "Live", "bootNodes": [ "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", - "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", + "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", + "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", + "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", -- GitLab From 148d942ec0f1de2f8ea9e524428000c8886c678c Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:28:48 +0300 Subject: [PATCH 038/107] txBroadcast: Stabilize to version 1 (#4169) This PR stabilizes the txBroadcast API to version 1. Ideally needs: - https://github.com/paritytech/polkadot-sdk/pull/4050 - https://github.com/paritytech/polkadot-sdk/pull/3772 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_4169.prdoc | 8 ++ .../client/rpc-spec-v2/src/transaction/api.rs | 5 +- .../tests/transaction_broadcast_tests.rs | 84 ++++++++----------- 3 files changed, 46 insertions(+), 51 deletions(-) create mode 100644 prdoc/pr_4169.prdoc diff --git a/prdoc/pr_4169.prdoc b/prdoc/pr_4169.prdoc new file mode 100644 index 00000000000..03f2f6e597e --- /dev/null +++ b/prdoc/pr_4169.prdoc @@ -0,0 +1,8 @@ +title: Stabilize transactionBroadcast RPC class to version 1 + +doc: + - audience: Node Dev + description: | + The transactionBroadcast RPC API is stabilized to version 1. + +crates: [ ] diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index 119bf270c63..c4a89219058 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -47,7 +47,8 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_broadcast", raw_method)] + + #[method(name = "transaction_v1_broadcast", raw_method)] async fn broadcast(&self, bytes: Bytes) -> RpcResult>; /// Broadcast an extrinsic to the chain. @@ -55,6 +56,6 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_stop", raw_method)] + #[method(name = "transaction_v1_stop", raw_method)] async fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index f4a69bd6ed4..efb3bd94ddb 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -46,12 +46,12 @@ async fn tx_broadcast_enters_pool() { let xt = hex_string(&uxt.encode()); let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -84,10 +84,7 @@ async fn tx_broadcast_enters_pool() { // The future broadcast awaits for the finalized status to be reached. // Force the future to exit by calling stop. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); + let _: () = tx_api.call("transaction_v1_stop", rpc_params![&operation_id]).await.unwrap(); // Ensure the broadcast future finishes. let _ = get_next_event!(&mut exec_middleware.recv); @@ -101,7 +98,7 @@ async fn tx_broadcast_invalid_tx() { // Invalid parameters. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) + .call::<_, serde_json::Value>("transaction_v1_broadcast", [1u8]) .await .unwrap_err(); assert_matches!(err, @@ -113,7 +110,7 @@ async fn tx_broadcast_invalid_tx() { // Invalid transaction that cannot be decoded. The broadcast silently exits. let xt = "0xdeadbeef"; let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); assert_eq!(0, pool.status().ready); @@ -124,7 +121,7 @@ async fn tx_broadcast_invalid_tx() { // When the operation is not active, either from the tx being finalized or a // terminal error; the stop method should return an error. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .call::<_, serde_json::Value>("transaction_v1_stop", rpc_params![&operation_id]) .await .unwrap_err(); assert_matches!(err, @@ -138,7 +135,7 @@ async fn tx_stop_with_invalid_operation_id() { // Make an invalid stop call. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) + .call::<_, serde_json::Value>("transaction_v1_stop", ["invalid_operation_id"]) .await .unwrap_err(); assert_matches!(err, @@ -161,15 +158,13 @@ async fn tx_broadcast_resubmits_future_nonce_tx() { let future_uxt = uxt(Alice, ALICE_NONCE + 1); let future_xt = hex_string(&future_uxt.encode()); - let future_operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![&future_xt]) - .await - .unwrap(); + let future_operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![&future_xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -188,13 +183,11 @@ async fn tx_broadcast_resubmits_future_nonce_tx() { let block_2_header = api.push_block(2, vec![], true); let block_2 = block_2_header.hash(); - let operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) - .await - .unwrap(); + let operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![¤t_xt]).await.unwrap(); assert_ne!(future_operation_id, operation_id); - // Announce block 2 to `transaction_unstable_broadcast`. + // Announce block 2 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_2_header).await; // Collect the events of both transactions. @@ -249,12 +242,12 @@ async fn tx_broadcast_stop_after_broadcast_finishes() { let xt = hex_string(&uxt.encode()); let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction // pool.inner_pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( @@ -303,7 +296,7 @@ async fn tx_broadcast_stop_after_broadcast_finishes() { // The operation ID is no longer valid, check that the broadcast future // cleared out the inner state of the operation. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .call::<_, serde_json::Value>("transaction_v1_stop", rpc_params![&operation_id]) .await .unwrap_err(); assert_matches!(err, @@ -328,14 +321,14 @@ async fn tx_broadcast_resubmits_invalid_tx() { let uxt = uxt(Alice, ALICE_NONCE); let xt = hex_string(&uxt.encode()); let _operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); let block_1_header = api.push_block(1, vec![], true); let block_1 = block_1_header.hash(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -355,7 +348,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { pool.inner_pool.maintain(event).await; assert_eq!(1, pool.inner_pool.status().ready); - // Ensure the `transaction_unstable_broadcast` is aware of the invalid transaction. + // Ensure the `transaction_v1_broadcast` is aware of the invalid transaction. let event = get_next_event!(&mut pool_middleware); // Because we have received an `Invalid` status, we try to broadcast the transaction with the // next announced block. @@ -388,7 +381,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { pool.inner_pool.maintain(event).await; assert_eq!(0, pool.inner_pool.status().ready); - // Announce block to `transaction_unstable_broadcast`. + // Announce block to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_3_header).await; let event = get_next_event!(&mut pool_middleware); @@ -456,12 +449,10 @@ async fn tx_broadcast_resubmits_dropped_tx() { // are immediately dropped. api.set_priority(¤t_uxt, 10); - let current_operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) - .await - .unwrap(); + let current_operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![¤t_xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. let block_1_header = api.push_block(1, vec![], true); let event = ChainEvent::Finalized { hash: block_1_header.hash(), tree_route: Arc::from(vec![]) }; @@ -480,10 +471,8 @@ async fn tx_broadcast_resubmits_dropped_tx() { // The future tx has priority 2, smaller than the current 10. api.set_priority(&future_uxt, 2); - let future_operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![&future_xt]) - .await - .unwrap(); + let future_operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![&future_xt]).await.unwrap(); assert_ne!(current_operation_id, future_operation_id); let block_2_header = api.push_block(2, vec![], true); @@ -535,12 +524,12 @@ async fn tx_broadcast_limit_reached() { let xt = hex_string(&uxt.encode()); let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -552,17 +541,14 @@ async fn tx_broadcast_limit_reached() { assert_eq!(1, exec_middleware.num_tasks()); let operation_id_limit_reached: Option = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); assert!(operation_id_limit_reached.is_none(), "No operation ID => tx was rejected"); // We still have in flight one operation. assert_eq!(1, exec_middleware.num_tasks()); // Force the future to exit by calling stop. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); + let _: () = tx_api.call("transaction_v1_stop", rpc_params![&operation_id]).await.unwrap(); // Ensure the broadcast future finishes. let _ = get_next_event!(&mut exec_middleware.recv); @@ -570,5 +556,5 @@ async fn tx_broadcast_limit_reached() { // Can resubmit again now. let _operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); } -- GitLab From eba3deca3e61855c237a33013e8a5e82c479e958 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:48:44 +0300 Subject: [PATCH 039/107] txWatch: Stabilize txWatch to version 1 (#4171) This PR stabilizes the txBroadcast API to version 1. Needs from spec: - https://github.com/paritytech/json-rpc-interface-spec/pull/153 - https://github.com/paritytech/json-rpc-interface-spec/pull/154 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_4171.prdoc | 8 ++++++++ substrate/client/rpc-spec-v2/src/transaction/api.rs | 4 ++-- .../src/transaction/tests/transaction_tests.rs | 6 +++--- 3 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_4171.prdoc diff --git a/prdoc/pr_4171.prdoc b/prdoc/pr_4171.prdoc new file mode 100644 index 00000000000..eef45ba922c --- /dev/null +++ b/prdoc/pr_4171.prdoc @@ -0,0 +1,8 @@ +title: Stabilize transactionWatch RPC class to version 1 + +doc: + - audience: Node Dev + description: | + The transactionWatch RPC API is stabilized to version 1. + +crates: [ ] diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index c4a89219058..ed358922d53 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -33,8 +33,8 @@ pub trait TransactionApi { /// /// This method is unstable and subject to change in the future. #[subscription( - name = "transactionWatch_unstable_submitAndWatch" => "transactionWatch_unstable_watchEvent", - unsubscribe = "transactionWatch_unstable_unwatch", + name = "transactionWatch_v1_submitAndWatch" => "transactionWatch_v1_watchEvent", + unsubscribe = "transactionWatch_v1_unwatch", item = TransactionEvent, )] fn submit_and_watch(&self, bytes: Bytes); diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs index c83bc948c43..7ce85b9feaf 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs @@ -38,7 +38,7 @@ async fn tx_invalid_bytes() { // This should not rely on the tx pool state. let mut sub = tx_api - .subscribe_unbounded("transactionWatch_unstable_submitAndWatch", rpc_params![&"0xdeadbeef"]) + .subscribe_unbounded("transactionWatch_v1_submitAndWatch", rpc_params![&"0xdeadbeef"]) .await .unwrap(); @@ -56,7 +56,7 @@ async fn tx_in_finalized() { let xt = hex_string(&uxt.encode()); let mut sub = tx_api - .subscribe_unbounded("transactionWatch_unstable_submitAndWatch", rpc_params![&xt]) + .subscribe_unbounded("transactionWatch_v1_submitAndWatch", rpc_params![&xt]) .await .unwrap(); @@ -95,7 +95,7 @@ async fn tx_with_pruned_best_block() { let xt = hex_string(&uxt.encode()); let mut sub = tx_api - .subscribe_unbounded("transactionWatch_unstable_submitAndWatch", rpc_params![&xt]) + .subscribe_unbounded("transactionWatch_v1_submitAndWatch", rpc_params![&xt]) .await .unwrap(); -- GitLab From 4eabe5e0dddc4cd31ad9dab5645350360d4d36a5 Mon Sep 17 00:00:00 2001 From: maksimryndin Date: Fri, 19 Apr 2024 15:36:36 +0200 Subject: [PATCH 040/107] Pvf refactor execute worker errors follow up (#4071) follow up of https://github.com/paritytech/polkadot-sdk/pull/2604 closes https://github.com/paritytech/polkadot-sdk/pull/2604 - [x] take relevant changes from Marcin's PR - [x] extract common duplicate code for workers (low-hanging fruits) ~Some unpassed ci problems are more general and should be fixed in master (see https://github.com/paritytech/polkadot-sdk/pull/4074)~ Proposed labels: **T0-node**, **R0-silent**, **I4-refactor** ----- kusama address: FZXVQLqLbFV2otNXs6BMnNch54CFJ1idpWwjMb3Z8fTLQC6 --------- Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> --- Cargo.lock | 6 +- polkadot/node/core/pvf/Cargo.toml | 22 ++- polkadot/node/core/pvf/common/Cargo.toml | 7 +- polkadot/node/core/pvf/common/src/error.rs | 3 + polkadot/node/core/pvf/common/src/execute.rs | 36 ++-- polkadot/node/core/pvf/common/src/lib.rs | 1 + polkadot/node/core/pvf/common/src/pvf.rs | 7 +- .../node/core/pvf/common/src/worker/mod.rs | 84 +++++++- .../node/core/pvf/execute-worker/src/lib.rs | 180 +++++++++--------- .../node/core/pvf/prepare-worker/src/lib.rs | 51 +---- polkadot/node/core/pvf/src/execute/queue.rs | 55 ++++-- .../core/pvf/src/execute/worker_interface.rs | 163 ++++++++-------- polkadot/node/core/pvf/src/host.rs | 5 +- 13 files changed, 333 insertions(+), 287 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7bf5215b6de..951f2548d34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7575,9 +7575,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libflate" @@ -13303,7 +13303,6 @@ dependencies = [ "slotmap", "sp-core", "sp-maybe-compressed-blob", - "sp-wasm-interface 20.0.0", "tempfile", "test-parachain-adder", "test-parachain-halt", @@ -13340,7 +13339,6 @@ name = "polkadot-node-core-pvf-common" version = "7.0.0" dependencies = [ "assert_matches", - "cfg-if", "cpu-time", "futures", "landlock", diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index a0233d6b751..8bfe2baa42f 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -17,8 +17,7 @@ cfg-if = "1.0" futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } -is_executable = "1.0.1" -libc = "0.2.152" +is_executable = { version = "1.0.1", optional = true } pin-project = "1.0.9" rand = "0.8.5" slotmap = "1.0" @@ -26,7 +25,9 @@ tempfile = "3.3.0" thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = [ + "derive", +] } polkadot-parachain-primitives = { path = "../../../parachain" } polkadot-core-primitives = { path = "../../../core-primitives" } @@ -37,14 +38,16 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } -sp-wasm-interface = { path = "../../../../substrate/primitives/wasm-interface" } -sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob" } +sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob", optional = true } polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } [dev-dependencies] assert_matches = "1.4.0" -criterion = { version = "0.4.0", default-features = false, features = ["async_tokio", "cargo_bench_support"] } +criterion = { version = "0.4.0", default-features = false, features = [ + "async_tokio", + "cargo_bench_support", +] } hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } @@ -57,6 +60,7 @@ adder = { package = "test-parachain-adder", path = "../../../parachain/test-para halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } [target.'cfg(target_os = "linux")'.dev-dependencies] +libc = "0.2.153" procfs = "0.16.0" rusty-fork = "0.3.0" sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } @@ -70,6 +74,8 @@ ci-only-tests = [] jemalloc-allocator = ["polkadot-node-core-pvf-common/jemalloc-allocator"] # This feature is used to export test code to other crates without putting it in the production build. test-utils = [ - "polkadot-node-core-pvf-execute-worker", - "polkadot-node-core-pvf-prepare-worker", + "dep:is_executable", + "dep:polkadot-node-core-pvf-execute-worker", + "dep:polkadot-node-core-pvf-prepare-worker", + "dep:sp-maybe-compressed-blob", ] diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index f3eb9d919aa..e1ce6e79cb9 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -10,14 +10,16 @@ license.workspace = true workspace = true [dependencies] -cfg-if = "1.0" cpu-time = "1.0.0" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" +nix = { version = "0.27.1", features = ["resource", "sched"] } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = [ + "derive", +] } polkadot-parachain-primitives = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } @@ -34,7 +36,6 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" -nix = { version = "0.27.1", features = ["sched"] } [target.'cfg(all(target_os = "linux", target_arch = "x86_64"))'.dependencies] seccompiler = "0.4.0" diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index cf274044456..adeb40c0b19 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -136,6 +136,9 @@ pub enum InternalValidationError { /// Could not find or open compiled artifact file. #[error("validation: could not find or open compiled artifact file: {0}")] CouldNotOpenFile(String), + /// Could not create a pipe between the worker and a child process. + #[error("validation: could not create pipe: {0}")] + CouldNotCreatePipe(String), /// Host could not clear the worker cache after a job. #[error("validation: host could not clear the worker cache ({path:?}) after a job: {err}")] CouldNotClearWorkerDir { diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 18c97b03cbc..ae6096cacec 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -30,35 +30,36 @@ pub struct Handshake { /// The response from the execution worker. #[derive(Debug, Encode, Decode)] -pub enum WorkerResponse { - /// The job completed successfully. - Ok { - /// The result of parachain validation. - result_descriptor: ValidationResult, - /// The amount of CPU time taken by the job. - duration: Duration, - }, - /// The candidate is invalid. - InvalidCandidate(String), - /// Instantiation of the WASM module instance failed during an execution. - /// Possibly related to local issues or dirty node update. May be retried with re-preparation. - RuntimeConstruction(String), +pub struct WorkerResponse { + /// The response from the execute job process. + pub job_response: JobResponse, + /// The amount of CPU time taken by the job. + pub duration: Duration, +} + +/// An error occurred in the worker process. +#[derive(thiserror::Error, Debug, Clone, Encode, Decode)] +pub enum WorkerError { /// The job timed out. + #[error("The job timed out")] JobTimedOut, /// The job process has died. We must kill the worker just in case. /// /// We cannot treat this as an internal error because malicious code may have killed the job. /// We still retry it, because in the non-malicious case it is likely spurious. + #[error("The job process (pid {job_pid}) has died: {err}")] JobDied { err: String, job_pid: i32 }, /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, /// etc. /// /// Because malicious code can cause a job error, we must not treat it as an internal error. We /// still retry it, because in the non-malicious case it is likely spurious. - JobError(String), + #[error("An unexpected error occurred in the job process: {0}")] + JobError(#[from] JobError), /// Some internal error occurred. - InternalError(InternalValidationError), + #[error("An internal error occurred: {0}")] + InternalError(#[from] InternalValidationError), } /// The result of a job on the execution worker. @@ -101,7 +102,7 @@ impl JobResponse { /// An unexpected error occurred in the execution job process. Because this comes from the job, /// which executes untrusted code, this error must likewise be treated as untrusted. That is, we /// cannot raise an internal error based on this. -#[derive(thiserror::Error, Debug, Encode, Decode)] +#[derive(thiserror::Error, Clone, Debug, Encode, Decode)] pub enum JobError { #[error("The job timed out")] TimedOut, @@ -114,4 +115,7 @@ pub enum JobError { CouldNotSpawnThread(String), #[error("An error occurred in the CPU time monitor thread: {0}")] CpuTimeMonitorThread(String), + /// Since the job can return any exit status it wants, we have to treat this as untrusted. + #[error("Unexpected exit status: {0}")] + UnexpectedExitStatus(i32), } diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 15097dbd3af..0cd92820163 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . //! Contains functionality related to PVFs that is shared by the PVF host and the PVF workers. +#![deny(unused_crate_dependencies)] pub mod error; pub mod execute; diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 340dffe07c3..5f248f49b9a 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -18,12 +18,7 @@ use crate::prepare::PrepareJobKind; use parity_scale_codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParams; -use std::{ - cmp::{Eq, PartialEq}, - fmt, - sync::Arc, - time::Duration, -}; +use std::{fmt, sync::Arc, time::Duration}; /// A struct that carries the exhaustive set of data to prepare an artifact out of plain /// Wasm binary diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index d7c95d9e704..67e7bece407 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -18,10 +18,13 @@ pub mod security; -use crate::{framed_recv_blocking, SecurityStatus, WorkerHandshake, LOG_TARGET}; +use crate::{ + framed_recv_blocking, framed_send_blocking, SecurityStatus, WorkerHandshake, LOG_TARGET, +}; use cpu_time::ProcessTime; use futures::never::Never; -use parity_scale_codec::Decode; +use nix::{errno::Errno, sys::resource::Usage}; +use parity_scale_codec::{Decode, Encode}; use std::{ any::Any, fmt::{self}, @@ -58,8 +61,6 @@ macro_rules! decl_worker_main { $crate::sp_tracing::try_init_simple(); - let worker_pid = std::process::id(); - let args = std::env::args().collect::>(); if args.len() == 1 { print_help($expected_command); @@ -548,6 +549,81 @@ fn recv_worker_handshake(stream: &mut UnixStream) -> io::Result Ok(worker_handshake) } +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +pub fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +pub fn recv_child_response( + received_data: &mut io::BufReader<&[u8]>, + context: &'static str, +) -> io::Result +where + T: Decode, +{ + let response_bytes = framed_recv_blocking(received_data)?; + T::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("{} pvf recv_child_response: decode error: {}", context, e), + ) + }) +} + +pub fn send_result( + stream: &mut UnixStream, + result: Result, + worker_info: &WorkerInfo, +) -> io::Result<()> +where + T: std::fmt::Debug, + E: std::fmt::Debug + std::fmt::Display, + Result: Encode, +{ + if let Err(ref err) = result { + gum::warn!( + target: LOG_TARGET, + ?worker_info, + "worker: error occurred: {}", + err + ); + } + gum::trace!( + target: LOG_TARGET, + ?worker_info, + "worker: sending result to host: {:?}", + result + ); + + framed_send_blocking(stream, &result.encode()).map_err(|err| { + gum::warn!( + target: LOG_TARGET, + ?worker_info, + "worker: error occurred sending result to host: {}", + err + ); + err + }) +} + +pub fn stringify_errno(context: &'static str, errno: Errno) -> String { + format!("{}: {}: {}", context, errno, io::Error::last_os_error()) +} + /// Functionality related to threads spawned by the workers. /// /// The motivation for this module is to coordinate worker threads without using async Rust. diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index bd7e76010a6..55f5290bd87 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,6 +16,9 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. +#![deny(unused_crate_dependencies)] +#![warn(missing_docs)] + pub use polkadot_node_core_pvf_common::{ error::ExecuteError, executor_interface::execute_artifact, }; @@ -36,11 +39,12 @@ use nix::{ use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, JobError, JobResponse, JobResult, WorkerResponse}, + execute::{Handshake, JobError, JobResponse, JobResult, WorkerError, WorkerResponse}, executor_interface::params_to_wasmtime_semantics, framed_recv_blocking, framed_send_blocking, worker::{ - cpu_time_monitor_loop, pipe2_cloexec, run_worker, stringify_panic_payload, + cpu_time_monitor_loop, get_total_cpu_usage, pipe2_cloexec, recv_child_response, run_worker, + send_result, stringify_errno, stringify_panic_payload, thread::{self, WaitOutcome}, PipeFd, WorkerInfo, WorkerKind, }, @@ -93,8 +97,14 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { Ok((params, execution_timeout)) } -fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Result<()> { - framed_send_blocking(stream, &response.encode()) +/// Sends an error to the host and returns the original error wrapped in `io::Error`. +macro_rules! map_and_send_err { + ($error:expr, $err_constructor:expr, $stream:expr, $worker_info:expr) => {{ + let err: WorkerError = $err_constructor($error.to_string()).into(); + let io_err = io::Error::new(io::ErrorKind::Other, err.to_string()); + let _ = send_result::($stream, Err(err), $worker_info); + io_err + }}; } /// The entrypoint that the spawned execute worker should start with. @@ -110,8 +120,6 @@ fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Resul /// check is not necessary. /// /// - `worker_version`: see above -/// -/// - `security_status`: contains the detected status of security features. pub fn worker_entrypoint( socket_path: PathBuf, worker_dir_path: PathBuf, @@ -127,13 +135,28 @@ pub fn worker_entrypoint( |mut stream, worker_info, security_status| { let artifact_path = worker_dir::execute_artifact(&worker_info.worker_dir_path); - let Handshake { executor_params } = recv_execute_handshake(&mut stream)?; + let Handshake { executor_params } = + recv_execute_handshake(&mut stream).map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::HostCommunication, + &mut stream, + worker_info + ) + })?; let executor_params: Arc = Arc::new(executor_params); let execute_thread_stack_size = max_stack_size(&executor_params); loop { - let (params, execution_timeout) = recv_request(&mut stream)?; + let (params, execution_timeout) = recv_request(&mut stream).map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::HostCommunication, + &mut stream, + worker_info + ) + })?; gum::debug!( target: LOG_TARGET, ?worker_info, @@ -143,27 +166,34 @@ pub fn worker_entrypoint( ); // Get the artifact bytes. - let compiled_artifact_blob = match std::fs::read(&artifact_path) { - Ok(bytes) => bytes, - Err(err) => { - let response = WorkerResponse::InternalError( - InternalValidationError::CouldNotOpenFile(err.to_string()), - ); - send_response(&mut stream, response)?; - continue - }, - }; - - let (pipe_read_fd, pipe_write_fd) = pipe2_cloexec()?; - - let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { - Ok(usage) => usage, - Err(errno) => { - let response = internal_error_from_errno("getrusage before", errno); - send_response(&mut stream, response)?; - continue - }, - }; + let compiled_artifact_blob = std::fs::read(&artifact_path).map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::CouldNotOpenFile, + &mut stream, + worker_info + ) + })?; + + let (pipe_read_fd, pipe_write_fd) = pipe2_cloexec().map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::CouldNotCreatePipe, + &mut stream, + worker_info + ) + })?; + + let usage_before = nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) + .map_err(|errno| { + let e = stringify_errno("getrusage before", errno); + map_and_send_err!( + e, + InternalValidationError::Kernel, + &mut stream, + worker_info + ) + })?; let stream_fd = stream.as_raw_fd(); let compiled_artifact_blob = Arc::new(compiled_artifact_blob); @@ -222,7 +252,7 @@ pub fn worker_entrypoint( "worker: sending result to host: {:?}", result ); - send_response(&mut stream, result)?; + send_result(&mut stream, result, worker_info)?; } }, ); @@ -270,7 +300,7 @@ fn handle_clone( worker_info: &WorkerInfo, have_unshare_newuser: bool, usage_before: Usage, -) -> io::Result { +) -> io::Result> { use polkadot_node_core_pvf_common::worker::security; // SAFETY: new process is spawned within a single threaded process. This invariant @@ -301,7 +331,8 @@ fn handle_clone( usage_before, execution_timeout, ), - Err(security::clone::Error::Clone(errno)) => Ok(internal_error_from_errno("clone", errno)), + Err(security::clone::Error::Clone(errno)) => + Ok(Err(internal_error_from_errno("clone", errno))), } } @@ -316,7 +347,7 @@ fn handle_fork( execute_worker_stack_size: usize, worker_info: &WorkerInfo, usage_before: Usage, -) -> io::Result { +) -> io::Result> { // SAFETY: new process is spawned within a single threaded process. This invariant // is enforced by tests. match unsafe { nix::unistd::fork() } { @@ -338,7 +369,7 @@ fn handle_fork( usage_before, execution_timeout, ), - Err(errno) => Ok(internal_error_from_errno("fork", errno)), + Err(errno) => Ok(Err(internal_error_from_errno("fork", errno))), } } @@ -483,11 +514,11 @@ fn handle_parent_process( job_pid: Pid, usage_before: Usage, timeout: Duration, -) -> io::Result { +) -> io::Result> { // the read end will wait until all write ends have been closed, // this drop is necessary to avoid deadlock if let Err(errno) = nix::unistd::close(pipe_write_fd) { - return Ok(internal_error_from_errno("closing pipe write fd", errno)); + return Ok(Err(internal_error_from_errno("closing pipe write fd", errno))); }; // SAFETY: pipe_read_fd is an open and owned file descriptor at this point. @@ -512,7 +543,7 @@ fn handle_parent_process( let usage_after = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { Ok(usage) => usage, - Err(errno) => return Ok(internal_error_from_errno("getrusage after", errno)), + Err(errno) => return Ok(Err(internal_error_from_errno("getrusage after", errno))), }; // Using `getrusage` is needed to check whether child has timedout since we cannot rely on @@ -530,32 +561,25 @@ fn handle_parent_process( cpu_tv.as_millis(), timeout.as_millis(), ); - return Ok(WorkerResponse::JobTimedOut) + return Ok(Err(WorkerError::JobTimedOut)) } match status { Ok(WaitStatus::Exited(_, exit_status)) => { let mut reader = io::BufReader::new(received_data.as_slice()); - let result = match recv_child_response(&mut reader) { - Ok(result) => result, - Err(err) => return Ok(WorkerResponse::JobError(err.to_string())), - }; + let result = recv_child_response(&mut reader, "execute")?; match result { - Ok(JobResponse::Ok { result_descriptor }) => { + Ok(job_response) => { // The exit status should have been zero if no error occurred. if exit_status != 0 { - return Ok(WorkerResponse::JobError(format!( - "unexpected exit status: {}", - exit_status - ))) + return Ok(Err(WorkerError::JobError(JobError::UnexpectedExitStatus( + exit_status, + )))); } - Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) + Ok(Ok(WorkerResponse { job_response, duration: cpu_tv })) }, - Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), - Ok(JobResponse::RuntimeConstruction(err)) => - Ok(WorkerResponse::RuntimeConstruction(err)), Err(job_error) => { gum::warn!( target: LOG_TARGET, @@ -565,9 +589,9 @@ fn handle_parent_process( job_error, ); if matches!(job_error, JobError::TimedOut) { - Ok(WorkerResponse::JobTimedOut) + Ok(Err(WorkerError::JobTimedOut)) } else { - Ok(WorkerResponse::JobError(job_error.to_string())) + Ok(Err(WorkerError::JobError(job_error.into()))) } }, } @@ -576,50 +600,21 @@ fn handle_parent_process( // // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some // other reason, so we still need to check for seccomp violations elsewhere. - Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(WorkerResponse::JobDied { + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(Err(WorkerError::JobDied { err: format!("received signal: {signal:?}"), job_pid: job_pid.as_raw(), - }), - Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), + })), + Err(errno) => Ok(Err(internal_error_from_errno("waitpid", errno))), // It is within an attacker's power to send an unexpected exit status. So we cannot treat // this as an internal error (which would make us abstain), but must vote against. - Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied { + Ok(unexpected_wait_status) => Ok(Err(WorkerError::JobDied { err: format!("unexpected status from wait: {unexpected_wait_status:?}"), job_pid: job_pid.as_raw(), - }), + })), } } -/// Calculate the total CPU time from the given `usage` structure, returned from -/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user -/// and system time. -/// -/// # Arguments -/// -/// - `rusage`: Contains resource usage information. -/// -/// # Returns -/// -/// Returns a `Duration` representing the total CPU time. -fn get_total_cpu_usage(rusage: Usage) -> Duration { - let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + - (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; - - return Duration::from_micros(micros) -} - -/// Get a job response. -fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { - let response_bytes = framed_recv_blocking(received_data)?; - JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("execute pvf recv_child_response: decode error: {}", e), - ) - }) -} - /// Write a job response to the pipe and exit process after. /// /// # Arguments @@ -638,15 +633,10 @@ fn send_child_response(pipe_write: &mut PipeFd, response: JobResult) -> ! { } } -fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerResponse { - WorkerResponse::InternalError(InternalValidationError::Kernel(format!( - "{}: {}: {}", - context, - errno, - io::Error::last_os_error() - ))) +fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerError { + WorkerError::InternalError(InternalValidationError::Kernel(stringify_errno(context, errno))) } fn job_error_from_errno(context: &'static str, errno: Errno) -> JobResult { - Err(JobError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error()))) + Err(JobError::Kernel(stringify_errno(context, errno))) } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 82a56107ef5..d1b218f48ae 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -26,7 +26,6 @@ const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; -use libc; use nix::{ errno::Errno, sys::{ @@ -48,7 +47,8 @@ use polkadot_node_core_pvf_common::{ prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess}, pvf::PvfPrepData, worker::{ - cpu_time_monitor_loop, run_worker, stringify_panic_payload, + cpu_time_monitor_loop, get_total_cpu_usage, recv_child_response, run_worker, send_result, + stringify_errno, stringify_panic_payload, thread::{self, spawn_worker_thread, WaitOutcome}, WorkerKind, }, @@ -117,11 +117,6 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { Ok(pvf) } -/// Send a worker response. -fn send_response(stream: &mut UnixStream, result: PrepareWorkerResult) -> io::Result<()> { - framed_send_blocking(stream, &result.encode()) -} - fn start_memory_tracking(fd: RawFd, limit: Option) { unsafe { // SAFETY: Inside the failure handler, the allocator is locked and no allocations or @@ -178,8 +173,6 @@ fn end_memory_tracking() -> isize { /// /// - `worker_version`: see above /// -/// - `security_status`: contains the detected status of security features. -/// /// # Flow /// /// This runs the following in a loop: @@ -233,8 +226,9 @@ pub fn worker_entrypoint( let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { Ok(usage) => usage, Err(errno) => { - let result = Err(error_from_errno("getrusage before", errno)); - send_response(&mut stream, result)?; + let result: PrepareWorkerResult = + Err(error_from_errno("getrusage before", errno)); + send_result(&mut stream, result, worker_info)?; continue }, }; @@ -294,7 +288,7 @@ pub fn worker_entrypoint( "worker: sending result to host: {:?}", result ); - send_response(&mut stream, result)?; + send_result(&mut stream, result, worker_info)?; } }, ); @@ -666,7 +660,7 @@ fn handle_parent_process( match status { Ok(WaitStatus::Exited(_pid, exit_status)) => { let mut reader = io::BufReader::new(received_data.as_slice()); - let result = recv_child_response(&mut reader) + let result = recv_child_response(&mut reader, "prepare") .map_err(|err| PrepareError::JobError(err.to_string()))?; match result { @@ -726,35 +720,6 @@ fn handle_parent_process( } } -/// Calculate the total CPU time from the given `usage` structure, returned from -/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user -/// and system time. -/// -/// # Arguments -/// -/// - `rusage`: Contains resource usage information. -/// -/// # Returns -/// -/// Returns a `Duration` representing the total CPU time. -fn get_total_cpu_usage(rusage: Usage) -> Duration { - let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + - (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; - - return Duration::from_micros(micros) -} - -/// Get a job response. -fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { - let response_bytes = framed_recv_blocking(received_data)?; - JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("prepare pvf recv_child_response: decode error: {:?}", e), - ) - }) -} - /// Write a job response to the pipe and exit process after. /// /// # Arguments @@ -774,7 +739,7 @@ fn send_child_response(pipe_write: &mut PipeFd, response: JobResult) -> ! { } fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { - PrepareError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error())) + PrepareError::Kernel(stringify_errno(context, errno)) } type JobResult = Result; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index bdc3c7327b0..af147a2ba22 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -16,7 +16,7 @@ //! A queue that handles requests for PVF execution. -use super::worker_interface::Outcome; +use super::worker_interface::{Error as WorkerInterfaceError, Response as WorkerInterfaceResponse}; use crate::{ artifacts::{ArtifactId, ArtifactPathId}, host::ResultSender, @@ -30,7 +30,10 @@ use futures::{ stream::{FuturesUnordered, StreamExt as _}, Future, FutureExt, }; -use polkadot_node_core_pvf_common::SecurityStatus; +use polkadot_node_core_pvf_common::{ + execute::{JobResponse, WorkerError, WorkerResponse}, + SecurityStatus, +}; use polkadot_primitives::{ExecutorParams, ExecutorParamsHash}; use slotmap::HopSlotMap; use std::{ @@ -133,7 +136,12 @@ impl Workers { enum QueueEvent { Spawn(IdleWorker, WorkerHandle, ExecuteJob), - StartWork(Worker, Outcome, ArtifactId, ResultSender), + StartWork( + Worker, + Result, + ArtifactId, + ResultSender, + ), } type Mux = FuturesUnordered>; @@ -340,23 +348,34 @@ fn handle_worker_spawned( async fn handle_job_finish( queue: &mut Queue, worker: Worker, - outcome: Outcome, + worker_result: Result, artifact_id: ArtifactId, result_tx: ResultSender, ) { - let (idle_worker, result, duration, sync_channel) = match outcome { - Outcome::Ok { result_descriptor, duration, idle_worker } => { + let (idle_worker, result, duration, sync_channel) = match worker_result { + Ok(WorkerInterfaceResponse { + worker_response: + WorkerResponse { job_response: JobResponse::Ok { result_descriptor }, duration }, + idle_worker, + }) => { // TODO: propagate the soft timeout (Some(idle_worker), Ok(result_descriptor), Some(duration), None) }, - Outcome::InvalidCandidate { err, idle_worker } => ( + Ok(WorkerInterfaceResponse { + worker_response: WorkerResponse { job_response: JobResponse::InvalidCandidate(err), .. }, + idle_worker, + }) => ( Some(idle_worker), Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, None, ), - Outcome::RuntimeConstruction { err, idle_worker } => { + Ok(WorkerInterfaceResponse { + worker_response: + WorkerResponse { job_response: JobResponse::RuntimeConstruction(err), .. }, + idle_worker, + }) => { // The task for artifact removal is executed concurrently with // the message to the host on the execution result. let (result_tx, result_rx) = oneshot::channel(); @@ -376,27 +395,31 @@ async fn handle_job_finish( Some(result_rx), ) }, - Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None, None), + + Err(WorkerInterfaceError::InternalError(err)) | + Err(WorkerInterfaceError::WorkerError(WorkerError::InternalError(err))) => + (None, Err(ValidationError::Internal(err)), None, None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. - Outcome::HardTimeout => + Err(WorkerInterfaceError::HardTimeout) | + Err(WorkerInterfaceError::WorkerError(WorkerError::JobTimedOut)) => (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None), // "Maybe invalid" errors (will retry). - Outcome::WorkerIntfErr => ( + Err(WorkerInterfaceError::CommunicationErr(_err)) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, None, ), - Outcome::JobDied { err } => ( + Err(WorkerInterfaceError::WorkerError(WorkerError::JobDied { err, .. })) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, None, ), - Outcome::JobError { err } => ( + Err(WorkerInterfaceError::WorkerError(WorkerError::JobError(err))) => ( None, - Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err.to_string()))), None, None, ), @@ -543,14 +566,14 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { queue.mux.push( async move { let _timer = execution_timer; - let outcome = super::worker_interface::start_work( + let result = super::worker_interface::start_work( idle, job.artifact.clone(), job.exec_timeout, job.params, ) .await; - QueueEvent::StartWork(worker, outcome, job.artifact.id, job.result_tx) + QueueEvent::StartWork(worker, result, job.artifact.id, job.result_tx) } .boxed(), ); diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index db81da118d7..9dcadfb4c2a 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -29,10 +29,9 @@ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, WorkerResponse}, + execute::{Handshake, WorkerError, WorkerResponse}, worker_dir, SecurityStatus, }; -use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::ExecutorParams; use std::{path::Path, time::Duration}; use tokio::{io, net::UnixStream}; @@ -69,7 +68,8 @@ pub async fn spawn( gum::warn!( target: LOG_TARGET, worker_pid = %idle_worker.pid, - %err + "failed to send a handshake to the spawned worker: {}", + error ); err })?; @@ -78,39 +78,40 @@ pub async fn spawn( /// Outcome of PVF execution. /// -/// If the idle worker token is not returned, it means the worker must be terminated. -pub enum Outcome { - /// PVF execution completed successfully and the result is returned. The worker is ready for - /// another job. - Ok { result_descriptor: ValidationResult, duration: Duration, idle_worker: IdleWorker }, - /// The candidate validation failed. It may be for example because the wasm execution triggered - /// a trap. Errors related to the preparation process are not expected to be encountered by the - /// execution workers. - InvalidCandidate { err: String, idle_worker: IdleWorker }, - /// The error is probably transient. It may be for example - /// because the artifact was prepared with a Wasmtime version different from the version - /// in the current execution environment. - RuntimeConstruction { err: String, idle_worker: IdleWorker }, +/// PVF execution completed and the result is returned. The worker is ready for +/// another job. +pub struct Response { + /// The response (valid/invalid) from the worker. + pub worker_response: WorkerResponse, + /// Returning the idle worker token means the worker can be reused. + pub idle_worker: IdleWorker, +} +/// The idle worker token is not returned for any of these cases, meaning the worker must be +/// terminated. +/// +/// NOTE: Errors related to the preparation process are not expected to be encountered by the +/// execution workers. +#[derive(thiserror::Error, Debug)] +pub enum Error { /// The execution time exceeded the hard limit. The worker is terminated. + #[error("The communication with the worker exceeded the hard limit")] HardTimeout, /// An I/O error happened during communication with the worker. This may mean that the worker /// process already died. The token is not returned in any case. - WorkerIntfErr, - /// The job process has died. We must kill the worker just in case. - /// - /// We cannot treat this as an internal error because malicious code may have caused this. - JobDied { err: String }, - /// An unexpected error occurred in the job process. - /// - /// Because malicious code can cause a job error, we must not treat it as an internal error. - JobError { err: String }, + #[error("An I/O error happened during communication with the worker: {0}")] + CommunicationErr(#[from] io::Error), + /// The worker reported an error (can be from itself or from the job). The worker should not be + /// reused. + #[error("The worker reported an error: {0}")] + WorkerError(#[from] WorkerError), /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. /// /// Should only ever be used for errors independent of the candidate and PVF. Therefore it may /// be a problem with the worker, so we terminate it. - InternalError { err: InternalValidationError }, + #[error("An internal error occurred: {0}")] + InternalError(#[from] InternalValidationError), } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -123,7 +124,7 @@ pub async fn start_work( artifact: ArtifactPathId, execution_timeout: Duration, validation_params: Vec, -) -> Outcome { +) -> Result { let IdleWorker { mut stream, pid, worker_dir } = worker; gum::debug!( @@ -136,16 +137,18 @@ pub async fn start_work( ); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { - if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - validation_code_hash = ?artifact.id.code_hash, - ?error, - "failed to send an execute request", - ); - return Outcome::WorkerIntfErr - } + send_request(&mut stream, &validation_params, execution_timeout).await.map_err( + |error| { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + validation_code_hash = ?artifact.id.code_hash, + "failed to send an execute request: {}", + error, + ); + Error::InternalError(InternalValidationError::HostCommunication(error.to_string())) + }, + )?; // We use a generous timeout here. This is in addition to the one in the child process, in // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout @@ -153,12 +156,12 @@ pub async fn start_work( // load, but the CPU resources of the child can only be measured from the parent after the // child process terminates. let timeout = execution_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; - let response = futures::select! { - response = recv_response(&mut stream).fuse() => { - match response { - Ok(response) => - handle_response( - response, + let worker_result = futures::select! { + worker_result = recv_result(&mut stream).fuse() => { + match worker_result { + Ok(result) => + handle_result( + result, pid, execution_timeout, ) @@ -168,11 +171,11 @@ pub async fn start_work( target: LOG_TARGET, worker_pid = %pid, validation_code_hash = ?artifact.id.code_hash, - ?error, - "failed to recv an execute response", + "failed to recv an execute result: {}", + error, ); - return Outcome::WorkerIntfErr + return Err(Error::CommunicationErr(error)) }, } }, @@ -183,29 +186,16 @@ pub async fn start_work( validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded lenient timeout for execution, child worker likely stalled", ); - WorkerResponse::JobTimedOut + return Err(Error::HardTimeout) }, }; - match response { - WorkerResponse::Ok { result_descriptor, duration } => Outcome::Ok { - result_descriptor, - duration, - idle_worker: IdleWorker { stream, pid, worker_dir }, - }, - WorkerResponse::InvalidCandidate(err) => Outcome::InvalidCandidate { - err, - idle_worker: IdleWorker { stream, pid, worker_dir }, - }, - WorkerResponse::RuntimeConstruction(err) => Outcome::RuntimeConstruction { - err, + match worker_result { + Ok(worker_response) => Ok(Response { + worker_response, idle_worker: IdleWorker { stream, pid, worker_dir }, - }, - WorkerResponse::JobTimedOut => Outcome::HardTimeout, - WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, - WorkerResponse::JobError(err) => Outcome::JobError { err }, - - WorkerResponse::InternalError(err) => Outcome::InternalError { err }, + }), + Err(worker_error) => Err(worker_error.into()), } }) .await @@ -215,12 +205,12 @@ pub async fn start_work( /// /// Here we know the artifact exists, but is still located in a temporary file which will be cleared /// by [`with_worker_dir_setup`]. -async fn handle_response( - response: WorkerResponse, +async fn handle_result( + worker_result: Result, worker_pid: u32, execution_timeout: Duration, -) -> WorkerResponse { - if let WorkerResponse::Ok { duration, .. } = response { +) -> Result { + if let Ok(WorkerResponse { duration, .. }) = worker_result { if duration > execution_timeout { // The job didn't complete within the timeout. gum::warn!( @@ -232,11 +222,11 @@ async fn handle_response( ); // Return a timeout error. - return WorkerResponse::JobTimedOut + return Err(WorkerError::JobTimedOut) } } - response + worker_result } /// Create a temporary file for an artifact in the worker cache, execute the given future/closure @@ -249,9 +239,9 @@ async fn with_worker_dir_setup( pid: u32, artifact_path: &Path, f: F, -) -> Outcome +) -> Result where - Fut: futures::Future, + Fut: futures::Future>, F: FnOnce(WorkerDir) -> Fut, { // Cheaply create a hard link to the artifact. The artifact is always at a known location in the @@ -263,16 +253,14 @@ where target: LOG_TARGET, worker_pid = %pid, ?worker_dir, - "failed to clear worker cache after the job: {:?}", + "failed to clear worker cache after the job: {}", err, ); - return Outcome::InternalError { - err: InternalValidationError::CouldNotCreateLink(format!("{:?}", err)), - } + return Err(InternalValidationError::CouldNotCreateLink(format!("{:?}", err)).into()); } let worker_dir_path = worker_dir.path().to_owned(); - let outcome = f(worker_dir).await; + let result = f(worker_dir).await; // Try to clear the worker dir. if let Err(err) = clear_worker_dir_path(&worker_dir_path) { @@ -283,15 +271,14 @@ where "failed to clear worker cache after the job: {:?}", err, ); - return Outcome::InternalError { - err: InternalValidationError::CouldNotClearWorkerDir { - err: format!("{:?}", err), - path: worker_dir_path.to_str().map(String::from), - }, + return Err(InternalValidationError::CouldNotClearWorkerDir { + err: format!("{:?}", err), + path: worker_dir_path.to_str().map(String::from), } + .into()) } - outcome + result } /// Sends a handshake with information specific to the execute worker. @@ -308,12 +295,12 @@ async fn send_request( framed_send(stream, &execution_timeout.encode()).await } -async fn recv_response(stream: &mut UnixStream) -> io::Result { - let response_bytes = framed_recv(stream).await?; - WorkerResponse::decode(&mut response_bytes.as_slice()).map_err(|e| { +async fn recv_result(stream: &mut UnixStream) -> io::Result> { + let result_bytes = framed_recv(stream).await?; + Result::::decode(&mut result_bytes.as_slice()).map_err(|e| { io::Error::new( io::ErrorKind::Other, - format!("execute pvf recv_response: decode error: {:?}", e), + format!("execute pvf recv_result: decode error: {:?}", e), ) }) } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 247d753d7c4..2d180fc5929 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -959,10 +959,7 @@ pub(crate) mod tests { use crate::{artifacts::generate_artifact_path, PossiblyInvalidError}; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::{ - error::PrepareError, - prepare::{PrepareStats, PrepareSuccess}, - }; + use polkadot_node_core_pvf_common::prepare::PrepareStats; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); -- GitLab From e504c41a5adbd5e6d9a7764c07f6dcf47b2dae77 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sat, 20 Apr 2024 02:05:34 +0200 Subject: [PATCH 041/107] Allow privileged virtual bond in Staking pallet (#3889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the first PR in preparation for https://github.com/paritytech/polkadot-sdk/issues/454. ## Follow ups: - https://github.com/paritytech/polkadot-sdk/pull/3904. - https://github.com/paritytech/polkadot-sdk/pull/3905. Overall changes are documented here (lot more visual 😍): https://hackmd.io/@ak0n/454-np-governance [Maybe followup](https://github.com/paritytech/polkadot-sdk/issues/4217) with migration of storage item `VirtualStakers` as a bool or enum in `Ledger`. ## Context We want to achieve a way for a user (`Delegator`) to delegate their funds to another account (`Agent`). Delegate implies the funds are locked in delegator account itself. Agent can act on behalf of delegator to stake directly on Staking pallet. The delegation feature is added to Staking via another pallet `delegated-staking` worked on [here](https://github.com/paritytech/polkadot-sdk/pull/3904). ## Introduces: ### StakingUnchecked Trait As the name implies, this trait allows unchecked (non-locked) mutation of staking ledger. These apis are only meant to be used by other pallets in the runtime and should not be exposed directly to user code path. Also related: https://github.com/paritytech/polkadot-sdk/issues/3888. ### Virtual Bond Allows other pallets to stake via staking pallet while managing the locks on these accounts themselves. Introduces another storage `VirtualStakers` that whitelist these accounts. We also restrict virtual stakers to set reward account as themselves. Since the account has no locks, we cannot support compounding of rewards. Conservatively, we require them to set a separate account different from the staker. Since these are code managed, it should be easy for another pallet to redistribute reward and rebond them. ### Slashes Since there is no actual lock maintained by staking-pallet for virtual stakers, this pallet does not apply any slashes. It is then important for pallets managing virtual stakers to listen to slashing events and apply necessary slashes. --- prdoc/pr_3889.prdoc | 14 ++ substrate/frame/nomination-pools/src/mock.rs | 8 + substrate/frame/staking/src/ledger.rs | 43 ++-- substrate/frame/staking/src/mock.rs | 23 +- substrate/frame/staking/src/pallet/impls.rs | 155 +++++++++++- substrate/frame/staking/src/pallet/mod.rs | 39 ++- substrate/frame/staking/src/slashing.rs | 22 +- substrate/frame/staking/src/tests.rs | 243 ++++++++++++++++++- substrate/primitives/staking/src/lib.rs | 38 ++- 9 files changed, 513 insertions(+), 72 deletions(-) create mode 100644 prdoc/pr_3889.prdoc diff --git a/prdoc/pr_3889.prdoc b/prdoc/pr_3889.prdoc new file mode 100644 index 00000000000..b32ffcc214c --- /dev/null +++ b/prdoc/pr_3889.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Allow privileged virtual bond into pallet Staking + +doc: + - audience: Runtime Dev + description: | + Introduces a new low level API to allow privileged virtual bond into pallet Staking. This allows other pallets + to stake funds into staking pallet while managing the fund lock and unlocking process themselves. + +crates: + - name: pallet-staking + diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index b9301a40095..686402b8434 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -131,6 +131,10 @@ impl sp_staking::StakingInterface for StakingMock { Ok(()) } + fn update_payee(_stash: &Self::AccountId, _reward_acc: &Self::AccountId) -> DispatchResult { + unimplemented!("method currently not used in testing") + } + fn chill(_: &Self::AccountId) -> sp_runtime::DispatchResult { Ok(()) } @@ -223,6 +227,10 @@ impl sp_staking::StakingInterface for StakingMock { fn max_exposure_page_size() -> sp_staking::Page { unimplemented!("method currently not used in testing") } + + fn slash_reward_fraction() -> Perbill { + unimplemented!("method currently not used in testing") + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 9461daefed6..67a86b86226 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -33,13 +33,14 @@ use frame_support::{ defensive, ensure, - traits::{Defensive, LockableCurrency, WithdrawReasons}, + traits::{Defensive, LockableCurrency}, }; use sp_staking::StakingAccount; use sp_std::prelude::*; use crate::{ - BalanceOf, Bonded, Config, Error, Ledger, Payee, RewardDestination, StakingLedger, STAKING_ID, + BalanceOf, Bonded, Config, Error, Ledger, Pallet, Payee, RewardDestination, StakingLedger, + VirtualStakers, STAKING_ID, }; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -187,7 +188,17 @@ impl StakingLedger { return Err(Error::::NotStash) } - T::Currency::set_lock(STAKING_ID, &self.stash, self.total, WithdrawReasons::all()); + // We skip locking virtual stakers. + if !Pallet::::is_virtual_staker(&self.stash) { + // for direct stakers, update lock on stash based on ledger. + T::Currency::set_lock( + STAKING_ID, + &self.stash, + self.total, + frame_support::traits::WithdrawReasons::all(), + ); + } + Ledger::::insert( &self.controller().ok_or_else(|| { defensive!("update called on a ledger that is not bonded."); @@ -204,22 +215,22 @@ impl StakingLedger { /// It sets the reward preferences for the bonded stash. pub(crate) fn bond(self, payee: RewardDestination) -> Result<(), Error> { if >::contains_key(&self.stash) { - Err(Error::::AlreadyBonded) - } else { - >::insert(&self.stash, payee); - >::insert(&self.stash, &self.stash); - self.update() + return Err(Error::::AlreadyBonded) } + + >::insert(&self.stash, payee); + >::insert(&self.stash, &self.stash); + self.update() } /// Sets the ledger Payee. pub(crate) fn set_payee(self, payee: RewardDestination) -> Result<(), Error> { if !>::contains_key(&self.stash) { - Err(Error::::NotStash) - } else { - >::insert(&self.stash, payee); - Ok(()) + return Err(Error::::NotStash) } + + >::insert(&self.stash, payee); + Ok(()) } /// Sets the ledger controller to its stash. @@ -252,12 +263,16 @@ impl StakingLedger { let controller = >::get(stash).ok_or(Error::::NotStash)?; >::get(&controller).ok_or(Error::::NotController).map(|ledger| { - T::Currency::remove_lock(STAKING_ID, &ledger.stash); Ledger::::remove(controller); - >::remove(&stash); >::remove(&stash); + // kill virtual staker if it exists. + if >::take(&stash).is_none() { + // if not virtual staker, clear locks. + T::Currency::remove_lock(STAKING_ID, &ledger.stash); + } + Ok(()) })? } diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 6db462c1a70..b46b863c016 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -249,17 +249,21 @@ parameter_types! { pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub static SlashObserver: BTreeMap> = BTreeMap::new(); } pub struct EventListenerMock; impl OnStakingUpdate for EventListenerMock { fn on_slash( - _pool_account: &AccountId, + pool_account: &AccountId, slashed_bonded: Balance, slashed_chunks: &BTreeMap, - _total_slashed: Balance, + total_slashed: Balance, ) { LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); + SlashObserver::mutate(|map| { + map.insert(*pool_account, map.get(pool_account).unwrap_or(&0) + total_slashed) + }); } } @@ -598,6 +602,21 @@ pub(crate) fn bond_nominator(who: AccountId, val: Balance, target: Vec, +) { + // In a real scenario, `who` is a keyless account managed by another pallet which provides for + // it. + System::inc_providers(&who); + + // Bond who virtually. + assert_ok!(::virtual_bond(&who, val, &payee)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(who), target)); +} + /// Progress to the given block, triggering session and era changes as we progress. /// /// This will finalize the previous block, initialize up to the given block, essentially simulating diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 2f43e4847e4..0c0ef0dbf46 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -28,15 +28,18 @@ use frame_support::{ pallet_prelude::*, traits::{ Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, - InspectLockableCurrency, Len, OnUnbalanced, TryCollect, UnixTime, + InspectLockableCurrency, Len, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; use sp_runtime::{ - traits::{Bounded, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero}, - Perbill, Percent, + traits::{ + Bounded, CheckedAdd, CheckedSub, Convert, One, SaturatedConversion, Saturating, + StaticLookup, Zero, + }, + ArithmeticError, Perbill, Percent, }; use sp_staking::{ currency_to_vote::CurrencyToVote, @@ -149,6 +152,39 @@ impl Pallet { Self::slashable_balance_of_vote_weight(who, issuance) } + pub(super) fn do_bond_extra(stash: &T::AccountId, additional: BalanceOf) -> DispatchResult { + let mut ledger = Self::ledger(StakingAccount::Stash(stash.clone()))?; + + // for virtual stakers, we don't need to check the balance. Since they are only accessed + // via low level apis, we can assume that the caller has done the due diligence. + let extra = if Self::is_virtual_staker(stash) { + additional + } else { + // additional amount or actual balance of stash whichever is lower. + additional.min( + T::Currency::free_balance(stash) + .checked_sub(&ledger.total) + .ok_or(ArithmeticError::Overflow)?, + ) + }; + + ledger.total = ledger.total.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; + ledger.active = ledger.active.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); + + // NOTE: ledger must be updated prior to calling `Self::weight_of`. + ledger.update()?; + // update this staker in the sorted list, if they exist in it. + if T::VoterList::contains(stash) { + let _ = T::VoterList::on_update(&stash, Self::weight_of(stash)).defensive(); + } + + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: extra }); + + Ok(()) + } + pub(super) fn do_withdraw_unbonded( controller: &T::AccountId, num_slashing_spans: u32, @@ -1132,6 +1168,11 @@ impl Pallet { ) -> Exposure> { EraInfo::::get_full_exposure(era, account) } + + /// Whether `who` is a virtual staker whose funds are managed by another pallet. + pub(crate) fn is_virtual_staker(who: &T::AccountId) -> bool { + VirtualStakers::::contains_key(who) + } } impl Pallet { @@ -1748,6 +1789,23 @@ impl StakingInterface for Pallet { .map(|_| ()) } + fn update_payee(stash: &Self::AccountId, reward_acc: &Self::AccountId) -> DispatchResult { + // Since virtual stakers are not allowed to compound their rewards as this pallet does not + // manage their locks, we do not allow reward account to be set same as stash. For + // external pallets that manage the virtual bond, they can claim rewards and re-bond them. + ensure!( + !Self::is_virtual_staker(stash) || stash != reward_acc, + Error::::RewardDestinationRestricted + ); + + // since controller is deprecated and this function is never used for old ledgers with + // distinct controllers, we can safely assume that stash is the controller. + Self::set_payee( + RawOrigin::Signed(stash.clone()).into(), + RewardDestination::Account(reward_acc.clone()), + ) + } + fn chill(who: &Self::AccountId) -> DispatchResult { // defensive-only: any account bonded via this interface has the stash set as the // controller, but we have to be sure. Same comment anywhere else that we read this. @@ -1832,6 +1890,10 @@ impl StakingInterface for Pallet { } } + fn slash_reward_fraction() -> Perbill { + SlashRewardFraction::::get() + } + sp_staking::runtime_benchmarks_enabled! { fn nominations(who: &Self::AccountId) -> Option> { Nominators::::get(who).map(|n| n.targets.into_inner()) @@ -1860,6 +1922,55 @@ impl StakingInterface for Pallet { } } +impl sp_staking::StakingUnchecked for Pallet { + fn migrate_to_virtual_staker(who: &Self::AccountId) { + T::Currency::remove_lock(crate::STAKING_ID, who); + VirtualStakers::::insert(who, ()); + } + + /// Virtually bonds `keyless_who` to `payee` with `value`. + /// + /// The payee must not be the same as the `keyless_who`. + fn virtual_bond( + keyless_who: &Self::AccountId, + value: Self::Balance, + payee: &Self::AccountId, + ) -> DispatchResult { + if StakingLedger::::is_bonded(StakingAccount::Stash(keyless_who.clone())) { + return Err(Error::::AlreadyBonded.into()) + } + + // check if payee not same as who. + ensure!(keyless_who != payee, Error::::RewardDestinationRestricted); + + // mark this pallet as consumer of `who`. + frame_system::Pallet::::inc_consumers(&keyless_who).map_err(|_| Error::::BadState)?; + + // mark who as a virtual staker. + VirtualStakers::::insert(keyless_who, ()); + + Self::deposit_event(Event::::Bonded { stash: keyless_who.clone(), amount: value }); + let ledger = StakingLedger::::new(keyless_who.clone(), value); + + ledger.bond(RewardDestination::Account(payee.clone()))?; + + Ok(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn migrate_to_direct_staker(who: &Self::AccountId) { + assert!(VirtualStakers::::contains_key(who)); + let ledger = StakingLedger::::get(Stash(who.clone())).unwrap(); + T::Currency::set_lock( + crate::STAKING_ID, + who, + ledger.total, + frame_support::traits::WithdrawReasons::all(), + ); + VirtualStakers::::remove(who); + } +} + #[cfg(any(test, feature = "try-runtime"))] impl Pallet { pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), TryRuntimeError> { @@ -1980,16 +2091,44 @@ impl Pallet { /// Invariants: /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). /// * The ledger's controller and stash matches the associated `Bonded` tuple. - /// * Staking locked funds for every bonded stash should be the same as its ledger's total. + /// * Staking locked funds for every bonded stash (non virtual stakers) should be the same as + /// its ledger's total. + /// * For virtual stakers, locked funds should be zero and payee should be non-stash account. /// * Staking ledger and bond are not corrupted. fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() .map(|(stash, ctrl)| { // ensure locks consistency. - ensure!( - Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), - "bond, ledger and/or staking lock inconsistent for a bonded stash." - ); + if VirtualStakers::::contains_key(stash.clone()) { + ensure!( + T::Currency::balance_locked(crate::STAKING_ID, &stash) == Zero::zero(), + "virtual stakers should not have any locked balance" + ); + ensure!( + >::get(stash.clone()).unwrap() == stash.clone(), + "stash and controller should be same" + ); + ensure!( + Ledger::::get(stash.clone()).unwrap().stash == stash, + "ledger corrupted for virtual staker" + ); + let reward_destination = >::get(stash.clone()).unwrap(); + if let RewardDestination::Account(payee) = reward_destination { + ensure!( + payee != stash.clone(), + "reward destination should not be same as stash for virtual staker" + ); + } else { + return Err(DispatchError::Other( + "reward destination must be of account variant for virtual staker", + )); + } + } else { + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + "bond, ledger and/or staking lock inconsistent for a bonded stash." + ); + } // ensure ledger consistency. Self::ensure_ledger_consistent(ctrl) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 2e5b3aa7b87..76ddad6f135 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -32,7 +32,7 @@ use frame_support::{ }; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ - traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, + traits::{SaturatedConversion, StaticLookup, Zero}, ArithmeticError, Perbill, Percent, }; @@ -379,6 +379,15 @@ pub mod pallet { pub type Nominators = CountedStorageMap<_, Twox64Concat, T::AccountId, Nominations>; + /// Stakers whose funds are managed by other pallets. + /// + /// This pallet does not apply any locks on them, therefore they are only virtually bonded. They + /// are expected to be keyless accounts and hence should not be allowed to mutate their ledger + /// directly via this pallet. Instead, these accounts are managed by other pallets and accessed + /// via low level apis. We keep track of them to do minimal integrity checks. + #[pallet::storage] + pub type VirtualStakers = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; + /// The maximum nominator count before we stop allowing new validators to join. /// /// When this value is not set, no limits are enforced. @@ -858,6 +867,10 @@ pub mod pallet { ControllerDeprecated, /// Cannot reset a ledger. CannotRestoreLedger, + /// Provided reward destination is not allowed. + RewardDestinationRestricted, + /// Not enough funds available to withdraw. + NotEnoughFunds, } #[pallet::hooks] @@ -985,29 +998,7 @@ pub mod pallet { #[pallet::compact] max_additional: BalanceOf, ) -> DispatchResult { let stash = ensure_signed(origin)?; - let mut ledger = Self::ledger(StakingAccount::Stash(stash.clone()))?; - - let stash_balance = T::Currency::free_balance(&stash); - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { - let extra = extra.min(max_additional); - ledger.total += extra; - ledger.active += extra; - // Last check: the new active amount of ledger must be more than ED. - ensure!( - ledger.active >= T::Currency::minimum_balance(), - Error::::InsufficientBond - ); - - // NOTE: ledger must be updated prior to calling `Self::weight_of`. - ledger.update()?; - // update this staker in the sorted list, if they exist in it. - if T::VoterList::contains(&stash) { - let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)).defensive(); - } - - Self::deposit_event(Event::::Bonded { stash, amount: extra }); - } - Ok(()) + Self::do_bond_extra(&stash, max_additional) } /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 709fd1441ec..2011e9eb830 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -609,8 +609,13 @@ pub fn do_slash( }; let value = ledger.slash(value, T::Currency::minimum_balance(), slash_era); + if value.is_zero() { + // nothing to do + return + } - if !value.is_zero() { + // Skip slashing for virtual stakers. The pallets managing them should handle the slashing. + if !Pallet::::is_virtual_staker(stash) { let (imbalance, missing) = T::Currency::slash(stash, value); slashed_imbalance.subsume(imbalance); @@ -618,17 +623,14 @@ pub fn do_slash( // deduct overslash from the reward payout *reward_payout = reward_payout.saturating_sub(missing); } + } - let _ = ledger - .update() - .defensive_proof("ledger fetched from storage so it exists in storage; qed."); + let _ = ledger + .update() + .defensive_proof("ledger fetched from storage so it exists in storage; qed."); - // trigger the event - >::deposit_event(super::Event::::Slashed { - staker: stash.clone(), - amount: value, - }); - } + // trigger the event + >::deposit_event(super::Event::::Slashed { staker: stash.clone(), amount: value }); } /// Apply a previously-unapplied slash. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index a5c9abe2f17..87f6fd424bd 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -27,7 +27,7 @@ use frame_support::{ assert_noop, assert_ok, assert_storage_noop, dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}, pallet_prelude::*, - traits::{Currency, Get, ReservableCurrency}, + traits::{Currency, Get, InspectLockableCurrency, ReservableCurrency}, }; use mock::*; @@ -623,12 +623,8 @@ fn nominating_and_rewards_should_work() { )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 21, 31])); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(3), - 1000, - RewardDestination::Account(3) - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![11, 21, 41])); + // the second nominator is virtual. + bond_virtual_nominator(3, 333, 1000, vec![11, 21, 41]); // the total reward for era 0 let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -694,10 +690,12 @@ fn nominating_and_rewards_should_work() { ); // Nominator 3: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 21]'s reward. ==> // 2/9 + 3/11 + assert_eq!(Balances::total_balance(&3), initial_balance); + // 333 is the reward destination for 3. assert_eq_error_rate!( - Balances::total_balance(&3), - initial_balance + (2 * payout_for_11 / 9 + 3 * payout_for_21 / 11), - 2, + Balances::total_balance(&333), + 2 * payout_for_11 / 9 + 3 * payout_for_21 / 11, + 2 ); // Validator 11: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 @@ -1893,7 +1891,7 @@ fn reap_stash_works() { .balance_factor(10) .build_and_execute(|| { // given - assert_eq!(Balances::free_balance(11), 10 * 1000); + assert_eq!(Balances::balance_locked(STAKING_ID, &11), 10 * 1000); assert_eq!(Staking::bonded(&11), Some(11)); assert!(>::contains_key(&11)); @@ -1919,6 +1917,8 @@ fn reap_stash_works() { assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); + // lock is removed. + assert_eq!(Balances::balance_locked(STAKING_ID, &11), 0); }); } @@ -6849,6 +6849,226 @@ mod staking_interface { } } +mod staking_unchecked { + use sp_staking::{Stake, StakingInterface, StakingUnchecked}; + + use super::*; + + #[test] + fn virtual_bond_does_not_lock() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + assert_eq!(Balances::free_balance(10), 1); + // 10 can bond more than its balance amount since we do not require lock for virtual + // bonding. + assert_ok!(::virtual_bond(&10, 100, &15)); + // nothing is locked on 10. + assert_eq!(Balances::balance_locked(STAKING_ID, &10), 0); + // adding more balance does not lock anything as well. + assert_ok!(::bond_extra(&10, 1000)); + // but ledger is updated correctly. + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 1100 }) + ); + + // lets try unbonding some amount. + assert_ok!(::unbond(&10, 200)); + assert_eq!( + Staking::ledger(10.into()).unwrap(), + StakingLedgerInspect { + stash: 10, + total: 1100, + active: 1100 - 200, + unlocking: bounded_vec![UnlockChunk { value: 200, era: 1 + 3 }], + legacy_claimed_rewards: bounded_vec![], + } + ); + + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 900 }) + ); + // still no locks. + assert_eq!(Balances::balance_locked(STAKING_ID, &10), 0); + + mock::start_active_era(2); + // cannot withdraw without waiting for unbonding period. + assert_ok!(::withdraw_unbonded(10, 0)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 900 }) + ); + + // in era 4, 10 can withdraw unlocking amount. + mock::start_active_era(4); + assert_ok!(::withdraw_unbonded(10, 0)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 900, active: 900 }) + ); + + // unbond all. + assert_ok!(::unbond(&10, 900)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 900, active: 0 }) + ); + mock::start_active_era(7); + assert_ok!(::withdraw_unbonded(10, 0)); + + // ensure withdrawing all amount cleans up storage. + assert_eq!(Staking::ledger(10.into()), Err(Error::::NotStash)); + assert_eq!(VirtualStakers::::contains_key(10), false); + }) + } + + #[test] + fn virtual_staker_cannot_pay_reward_to_self_account() { + ExtBuilder::default().build_and_execute(|| { + // cannot set payee to self + assert_noop!( + ::virtual_bond(&10, 100, &10), + Error::::RewardDestinationRestricted + ); + + // to another account works + assert_ok!(::virtual_bond(&10, 100, &11)); + + // cannot set via set_payee as well. + assert_noop!( + ::update_payee(&10, &10), + Error::::RewardDestinationRestricted + ); + }); + } + + #[test] + fn virtual_staker_cannot_bond_again() { + ExtBuilder::default().build_and_execute(|| { + // 200 virtual bonds + bond_virtual_nominator(200, 201, 500, vec![11, 21]); + + // Tries bonding again + assert_noop!( + ::virtual_bond(&200, 200, &201), + Error::::AlreadyBonded + ); + + // And again with a different reward destination. + assert_noop!( + ::virtual_bond(&200, 200, &202), + Error::::AlreadyBonded + ); + + // Direct bond is not allowed as well. + assert_noop!( + ::bond(&200, 200, &202), + Error::::AlreadyBonded + ); + }); + } + + #[test] + fn normal_staker_cannot_virtual_bond() { + ExtBuilder::default().build_and_execute(|| { + // 101 is a nominator trying to virtual bond + assert_noop!( + ::virtual_bond(&101, 200, &102), + Error::::AlreadyBonded + ); + + // validator 21 tries to virtual bond + assert_noop!( + ::virtual_bond(&21, 200, &22), + Error::::AlreadyBonded + ); + }); + } + + #[test] + fn migrate_virtual_staker() { + ExtBuilder::default().build_and_execute(|| { + // give some balance to 200 + Balances::make_free_balance_be(&200, 2000); + + // stake + assert_ok!(Staking::bond(RuntimeOrigin::signed(200), 1000, RewardDestination::Staked)); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &200), 1000); + + // migrate them to virtual staker + ::migrate_to_virtual_staker(&200); + // payee needs to be updated to a non-stash account. + assert_ok!(::update_payee(&200, &201)); + + // ensure the balance is not locked anymore + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &200), 0); + + // and they are marked as virtual stakers + assert_eq!(Pallet::::is_virtual_staker(&200), true); + }); + } + + #[test] + fn virtual_nominators_are_lazily_slashed() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], + &[slash_percent], + ); + + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); + + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + + // validator balance is slashed as usual + assert_eq!(balances(&11).0, validator_balance - validator_share); + // Because slashing happened. + assert!(is_disabled(11)); + + // but virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // but slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); + }) + } +} mod ledger { use super::*; @@ -7327,7 +7547,6 @@ mod ledger { mod ledger_recovery { use super::*; - use frame_support::traits::InspectLockableCurrency; #[test] fn inspect_recovery_ledger_simple_works() { diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 11b7ef41b9a..ad6cc6e2f4f 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -29,7 +29,7 @@ use core::ops::Sub; use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Zero}, - DispatchError, DispatchResult, RuntimeDebug, Saturating, + DispatchError, DispatchResult, Perbill, RuntimeDebug, Saturating, }; pub mod offence; @@ -254,6 +254,9 @@ pub trait StakingInterface { /// schedules have reached their unlocking era should allow more calls to this function. fn unbond(stash: &Self::AccountId, value: Self::Balance) -> DispatchResult; + /// Update the reward destination for the ledger associated with the stash. + fn update_payee(stash: &Self::AccountId, reward_acc: &Self::AccountId) -> DispatchResult; + /// Unlock any funds schedule to unlock before or at the current era. /// /// Returns whether the stash was killed because of this withdraw or not. @@ -274,7 +277,7 @@ pub trait StakingInterface { /// Checks whether an account `staker` has been exposed in an era. fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool; - /// Return the status of the given staker, `None` if not staked at all. + /// Return the status of the given staker, `Err` if not staked at all. fn status(who: &Self::AccountId) -> Result, DispatchError>; /// Checks whether or not this is a validator account. @@ -290,6 +293,9 @@ pub trait StakingInterface { } } + /// Returns the fraction of the slash to be rewarded to reporter. + fn slash_reward_fraction() -> Perbill; + #[cfg(feature = "runtime-benchmarks")] fn max_exposure_page_size() -> Page; @@ -304,6 +310,34 @@ pub trait StakingInterface { fn set_current_era(era: EraIndex); } +/// Set of low level apis to manipulate staking ledger. +/// +/// These apis bypass some or all safety checks and should only be used if you know what you are +/// doing. +pub trait StakingUnchecked: StakingInterface { + /// Migrate an existing staker to a virtual staker. + /// + /// It would release all funds held by the implementation pallet. + fn migrate_to_virtual_staker(who: &Self::AccountId); + + /// Book-keep a new bond for `keyless_who` without applying any locks (hence virtual). + /// + /// It is important that `keyless_who` is a keyless account and therefore cannot interact with + /// staking pallet directly. Caller is responsible for ensuring the passed amount is locked and + /// valid. + fn virtual_bond( + keyless_who: &Self::AccountId, + value: Self::Balance, + payee: &Self::AccountId, + ) -> DispatchResult; + + /// Migrate a virtual staker to a direct staker. + /// + /// Only used for testing. + #[cfg(feature = "runtime-benchmarks")] + fn migrate_to_direct_staker(who: &Self::AccountId); +} + /// The amount of exposure for an era that an individual nominator has (susceptible to slashing). #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct IndividualExposure { -- GitLab From f3c3ebb6a99295816ac4ee0a26364d736094c147 Mon Sep 17 00:00:00 2001 From: gui Date: Sat, 20 Apr 2024 17:20:35 +0900 Subject: [PATCH 042/107] Fix case in type in macro generation (#4223) Generated type is not camel case this generate some warnings from IDE label should be R0 --- substrate/frame/support/procedural/src/runtime/expand/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 93c88fce94b..011f69f3714 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -244,7 +244,7 @@ fn construct_runtime_final_expansion( // Prevent UncheckedExtrinsic to print unused warning. const _: () = { #[allow(unused)] - type __hidden_use_of_unchecked_extrinsic = #unchecked_extrinsic; + type __HiddenUseOfUncheckedExtrinsic = #unchecked_extrinsic; }; #[derive( -- GitLab From 253778c94dd64e6bc174ed1e03ac7e0b43990129 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Mon, 22 Apr 2024 15:08:38 +1000 Subject: [PATCH 043/107] ci: disallow westend migration failure (#4205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- .gitlab/pipeline/check.yml | 1 - polkadot/runtime/westend/src/lib.rs | 30 +---------------------------- 2 files changed, 1 insertion(+), 30 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 89b2c00db9b..6fb8a97fe95 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -132,7 +132,6 @@ check-runtime-migration-westend: WASM: "westend_runtime.compact.compressed.wasm" URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443" SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" - allow_failure: true check-runtime-migration-rococo: stage: check diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a06a1e1f7fc..02933efff94 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1646,36 +1646,8 @@ pub mod migrations { } } - // We don't have a limit in the Relay Chain. - const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; - /// Unreleased migrations. Add new ones here: - pub type Unreleased = ( - parachains_configuration::migration::v7::MigrateToV7, - pallet_staking::migrations::v14::MigrateToV14, - assigned_slots::migration::v1::MigrateToV1, - parachains_scheduler::migration::MigrateV1ToV2, - parachains_configuration::migration::v8::MigrateToV8, - parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::MigrateToV1, - pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_grandpa::migrations::MigrateV4ToV5, - parachains_configuration::migration::v10::MigrateToV10, - pallet_nomination_pools::migration::unversioned::TotalValueLockedSync, - // Migrate Identity pallet for Usernames - pallet_identity::migration::versioned::V0ToV1, - parachains_configuration::migration::v11::MigrateToV11, - parachains_configuration::migration::v12::MigrateToV12, - // permanent - pallet_xcm::migration::MigrateToLatestXcmVersion, - // Migrate from legacy lease to coretime. Needs to run after configuration v11 - coretime::migration::MigrateToCoretime< - Runtime, - crate::xcm_config::XcmRouter, - GetLegacyLeaseImpl, - >, - parachains_inclusion::migration::MigrateToV1, - ); + pub type Unreleased = (); } /// Unchecked extrinsic type as expected by this runtime. -- GitLab From e0202ece6390ad216e0bec455ee8ab47925d9caf Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 22 Apr 2024 13:26:32 +0200 Subject: [PATCH 044/107] [xcm] Assets: sort after `prepend_with` (#4235) Adds sorting to the XCM Assets' `prepend_with`, which could modify the order of `AssetId` locations. Relates to: https://github.com/paritytech/polkadot-sdk/pull/4186 (the same fix for `reanchored`) Part of: https://github.com/paritytech/polkadot-sdk/pull/2129 --- polkadot/xcm/src/v3/multiasset.rs | 71 ++++++++++++++++++++++++++++++- polkadot/xcm/src/v4/asset.rs | 59 ++++++++++++++++++++++++- 2 files changed, 126 insertions(+), 4 deletions(-) diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index f9041ecd81b..0662077b19d 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -825,7 +825,9 @@ impl MultiAssets { /// Prepend a `MultiLocation` to any concrete asset items, giving it a new root location. pub fn prepend_with(&mut self, prefix: &MultiLocation) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix)) + self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix))?; + self.0.sort(); + Ok(()) } /// Mutate the location of the asset identifier if concrete, giving it the same location @@ -1213,8 +1215,73 @@ mod tests { vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); + assert!(assets.reanchor(&dest, reanchor_context).is_ok()); - assert_eq!(assets, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored].into()); + assert_eq!(assets.0, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored]); + + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); + } + + #[test] + fn prepend_preserves_sorting() { + use super::*; + use alloc::vec; + + let prefix = MultiLocation::new(0, X1(Parachain(1000))); + + let asset_1: MultiAsset = + (MultiLocation::new(0, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_1_prepended = asset_1.clone(); + assert!(asset_1_prepended.prepend_with(&prefix).is_ok()); + // changes interior X2->X3 + assert_eq!( + asset_1_prepended, + (MultiLocation::new(0, X3(Parachain(1000), PalletInstance(50), GeneralIndex(1))), 10) + .into() + ); + + let asset_2: MultiAsset = + (MultiLocation::new(1, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_2_prepended = asset_2.clone(); + assert!(asset_2_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_2_prepended, + (MultiLocation::new(0, X2(PalletInstance(50), GeneralIndex(1))), 10).into() + ); + + let asset_3: MultiAsset = + (MultiLocation::new(2, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_3_prepended = asset_3.clone(); + assert!(asset_3_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_3_prepended, + (MultiLocation::new(1, X2(PalletInstance(50), GeneralIndex(1))), 10).into() + ); + + // `From` impl does sorting. + let mut assets: MultiAssets = vec![asset_1, asset_2, asset_3].into(); + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); + + // let's do `prepend_with` + assert!(assets.prepend_with(&prefix).is_ok()); + assert_eq!(assets.0, vec![asset_2_prepended, asset_1_prepended, asset_3_prepended]); + + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); } #[test] diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index bdff0c27230..8abd8f9f8fd 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -723,7 +723,9 @@ impl Assets { /// Prepend a `Location` to any concrete asset items, giving it a new root location. pub fn prepend_with(&mut self, prefix: &Location) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix)) + self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix))?; + self.0.sort(); + Ok(()) } /// Return a reference to an item at a specific index or `None` if it doesn't exist. @@ -1035,8 +1037,61 @@ mod tests { let mut assets: Assets = vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + assert!(assets.reanchor(&dest, &reanchor_context).is_ok()); - assert_eq!(assets, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored].into()); + assert_eq!(assets.0, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + } + + #[test] + fn prepend_preserves_sorting() { + use super::*; + use alloc::vec; + + let prefix = Location::new(0, [Parachain(1000)]); + + let asset_1: Asset = (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_1_prepended = asset_1.clone(); + assert!(asset_1_prepended.prepend_with(&prefix).is_ok()); + // changes interior X2->X3 + assert_eq!( + asset_1_prepended, + (Location::new(0, [Parachain(1000), PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_2: Asset = (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_2_prepended = asset_2.clone(); + assert!(asset_2_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_2_prepended, + (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_3: Asset = (Location::new(2, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_3_prepended = asset_3.clone(); + assert!(asset_3_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_3_prepended, + (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + // `From` impl does sorting. + let mut assets: Assets = vec![asset_1, asset_2, asset_3].into(); + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + + // let's do `prepend_with` + assert!(assets.prepend_with(&prefix).is_ok()); + assert_eq!(assets.0, vec![asset_2_prepended, asset_1_prepended, asset_3_prepended]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); } #[test] -- GitLab From ff7e2c88a460a0f9df26eb3f33d1c37f72508580 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 22 Apr 2024 13:34:04 +0200 Subject: [PATCH 045/107] Sanitize `UniversalLocation` witth `GlobalConsensus` + XCM small nits and improvements (#4238) This PR: - sanitizes all `UniversalLocation`s with `GlobalConsensus` (when possible) - addressing [comment](https://github.com/paritytech/polkadot-sdk/pull/4025#discussion_r1557361473) - adds `DefaultConfig` for `pallet-xcm-benchmarks` for `system` --- Cargo.lock | 1 - .../contracts-rococo/src/xcm_config.rs | 4 +-- .../glutton/glutton-westend/src/xcm_config.rs | 4 +-- .../runtimes/starters/shell/src/xcm_config.rs | 4 +-- .../testing/rococo-parachain/src/lib.rs | 4 +-- polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 1 - .../src/fungible/mock.rs | 32 +------------------ .../pallet-xcm-benchmarks/src/generic/mock.rs | 31 +----------------- .../xcm/pallet-xcm-benchmarks/src/mock.rs | 2 +- polkadot/xcm/pallet-xcm/src/mock.rs | 2 +- .../xcm/xcm-builder/src/universal_exports.rs | 4 +-- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 2 +- .../xcm-simulator/example/src/parachain.rs | 2 +- .../xcm-simulator/example/src/relay_chain.rs | 2 +- .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 2 +- .../xcm-simulator/fuzzer/src/relay_chain.rs | 2 +- .../contracts/mock-network/src/parachain.rs | 2 +- .../contracts/mock-network/src/relay_chain.rs | 2 +- .../runtime/src/configs/xcm_config.rs | 2 ++ 19 files changed, 23 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 951f2548d34..c932927a905 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11777,7 +11777,6 @@ dependencies = [ "polkadot-primitives", "polkadot-runtime-common", "scale-info", - "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 46fcbc6319c..9132b4e1760 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -51,9 +51,9 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const RelayLocation: Location = Location::parent(); - pub const RelayNetwork: Option = None; + pub const RelayNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorLocation = Parachain(ParachainInfo::parachain_id().into()).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub const ExecutiveBody: BodyId = BodyId::Executive; pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: Location = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index 15bb519e115..9d438a41f8f 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -30,8 +30,8 @@ use xcm_builder::{ parameter_types! { pub const WestendLocation: Location = Location::parent(); - pub const WestendNetwork: Option = Some(NetworkId::Westend); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub const WestendNetwork: NetworkId = NetworkId::Westend; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(WestendNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); } /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, diff --git a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs index df89158729c..7f9de0f64b3 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs @@ -30,8 +30,8 @@ use xcm_builder::{ parameter_types! { pub const RococoLocation: Location = Location::parent(); - pub const RococoNetwork: Option = Some(NetworkId::Rococo); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub const RococoNetwork: NetworkId = NetworkId::Rococo; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RococoNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); } /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index df335368be1..0ae93d1577c 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -327,9 +327,9 @@ impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { pub const RocLocation: Location = Location::parent(); - pub const RococoNetwork: Option = Some(NetworkId::Rococo); + pub const RococoNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RococoNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 8c71426a6fa..9691ddd4816 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -29,7 +29,6 @@ log = { workspace = true, default-features = true } [dev-dependencies] pallet-balances = { path = "../../../substrate/frame/balances" } pallet-assets = { path = "../../../substrate/frame/assets" } -sp-core = { path = "../../../substrate/primitives/core" } sp-tracing = { path = "../../../substrate/primitives/tracing" } xcm = { package = "staging-xcm", path = ".." } # temp diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index c831cd02465..d11f64e7494 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -20,11 +20,8 @@ use crate::{fungible as xcm_balances_benchmark, mock::*}; use frame_benchmarking::BenchmarkError; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing}, - weights::Weight, + traits::{Everything, Nothing}, }; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use xcm::latest::prelude::*; use xcm_builder::{AllowUnpaidExecutionFrom, FrameTransactionalProcessor, MintLocation}; @@ -40,37 +37,10 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 534f7d85ea2..f41df017b9d 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -21,10 +21,8 @@ use codec::Decode; use frame_support::{ derive_impl, parameter_types, traits::{Contains, Everything, OriginTrait}, - weights::Weight, }; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup, TrailingZeroInput}; +use sp_runtime::traits::TrailingZeroInput; use xcm_builder::{ test_utils::{ AssetsInHolding, TestAssetExchanger, TestAssetLocker, TestAssetTrap, @@ -45,37 +43,10 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } /// The benchmarks in this pallet should never need an asset transactor to begin with. diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs index 78a9e5f8a01..be3af5d4a3f 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs @@ -58,7 +58,7 @@ impl xcm_executor::traits::ConvertLocation for AccountIdConverter { } parameter_types! { - pub UniversalLocation: InteriorLocation = Junction::Parachain(101).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(ByGenesis([1; 32])), Junction::Parachain(101)].into(); pub UnitWeightCost: Weight = Weight::from_parts(10, 10); pub WeightPrice: (AssetId, u128, u128) = (AssetId(Here.into()), 1_000_000, 1024); } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 2cc228476ba..e3680c530e2 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -413,7 +413,7 @@ parameter_types! { )), }; pub const AnyNetwork: Option = None; - pub UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = GlobalConsensus(ByGenesis([0; 32])).into(); pub UnitWeightCost: u64 = 1_000; pub CheckingAccount: AccountId = XcmPallet::check_account(); } diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 6e031cdbc27..d0e3ef3032e 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -187,7 +187,7 @@ pub fn forward_id_for(original_id: &XcmHash) -> XcmHash { /// end with the `SetTopic` instruction. /// /// In the case that the message ends with a `SetTopic(T)` (as should be the case if the top-level -/// router is `EnsureUniqueTopic`), then the forwarding message (i.e. the one carrying the +/// router is `WithUniqueTopic`), then the forwarding message (i.e. the one carrying the /// export instruction *to* the bridge in local consensus) will also end with a `SetTopic` whose /// inner is `forward_id_for(T)`. If this is not the case then the onward message will not be given /// the `SetTopic` afterword. @@ -254,7 +254,7 @@ impl = None; - pub UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); pub UnitWeightCost: u64 = 1_000; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index cadfc1e7200..d8d65fbf0ce 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -101,7 +101,7 @@ parameter_types! { parameter_types! { pub const KsmLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub UniversalLocation: InteriorLocation = Parachain(MsgQueue::parachain_id().into()).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); } pub type LocationToAccountId = ( diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 6790b535d16..47209b765d1 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -104,7 +104,7 @@ parameter_types! { pub const TokenLocation: Location = Here.into_location(); pub const ThisNetwork: NetworkId = NetworkId::ByGenesis([0; 32]); pub const AnyNetwork: Option = None; - pub const UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = ThisNetwork::get().into(); } pub type SovereignAccountOf = diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index d4ad47581d1..843efab1502 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -144,7 +144,7 @@ parameter_types! { pub const KsmLocation: Location = Location::parent(); pub const TokenLocation: Location = Here.into_location(); pub const RelayNetwork: NetworkId = ByGenesis([0; 32]); - pub UniversalLocation: InteriorLocation = Parachain(MsgQueue::parachain_id().into()).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); } pub type XcmOriginToCallOrigin = ( diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index 470304ed357..d5e0ec9c83f 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -107,7 +107,7 @@ impl configuration::Config for Runtime { parameter_types! { pub RelayNetwork: NetworkId = ByGenesis([0; 32]); pub const TokenLocation: Location = Here.into_location(); - pub UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); pub UnitWeightCost: u64 = 1_000; } diff --git a/templates/parachain/runtime/src/configs/xcm_config.rs b/templates/parachain/runtime/src/configs/xcm_config.rs index 13da2363b05..c6b6e8da1b8 100644 --- a/templates/parachain/runtime/src/configs/xcm_config.rs +++ b/templates/parachain/runtime/src/configs/xcm_config.rs @@ -26,6 +26,8 @@ parameter_types! { pub const RelayLocation: Location = Location::parent(); pub const RelayNetwork: Option = None; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + // For the real deployment, it is recommended to set `RelayNetwork` according to the relay chain + // and prepend `UniversalLocation` with `GlobalConsensus(RelayNetwork::get())`. pub UniversalLocation: InteriorLocation = Parachain(ParachainInfo::parachain_id().into()).into(); } -- GitLab From 921265ca7889b9c9bc615af0eced9c6918c8af9f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 22 Apr 2024 15:06:16 +0300 Subject: [PATCH 046/107] Added prdoc for 4208 (#4239) --- prdoc/pr_4208.prdoc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 prdoc/pr_4208.prdoc diff --git a/prdoc/pr_4208.prdoc b/prdoc/pr_4208.prdoc new file mode 100644 index 00000000000..be2a1b084a5 --- /dev/null +++ b/prdoc/pr_4208.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fixed GrandpaConsensusLogReader::find_scheduled_change + +doc: + - audience: Runtime Dev + description: | + This PR fixes the issue with authorities set change digest item search + in the bridges code. The issue happens when there are multiple consensus + digest items in the same header digest. + +crates: + - name: bp-header-chain -- GitLab From a2a049db2bd669a88f6ab410b22b780ebcc8baee Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 22 Apr 2024 14:45:54 +0200 Subject: [PATCH 047/107] [subsystem-benchmark] Add approval-voting benchmark to CI (#4216) Co-authored-by: alvicsam --- .gitlab/pipeline/publish.yml | 4 + .gitlab/pipeline/test.yml | 30 +++--- Cargo.lock | 1 + polkadot/node/core/approval-voting/Cargo.toml | 11 +++ .../approval-voting-regression-bench.rs | 94 +++++++++++++++++++ 5 files changed, 125 insertions(+), 15 deletions(-) create mode 100644 polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index d8f5d583229..68712610ad2 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -74,6 +74,8 @@ publish-subsystem-benchmarks: artifacts: true - job: subsystem-benchmark-availability-distribution artifacts: true + - job: subsystem-benchmark-approval-voting + artifacts: true - job: publish-rustdoc artifacts: false script: @@ -115,6 +117,8 @@ trigger_workflow: artifacts: true - job: subsystem-benchmark-availability-distribution artifacts: true + - job: subsystem-benchmark-approval-voting + artifacts: true script: - echo "Triggering workflow" - > diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 1d6efd7b9fd..c17a3ce35ea 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -511,7 +511,7 @@ test-syscalls: fi allow_failure: false # this rarely triggers in practice -subsystem-benchmark-availability-recovery: +.subsystem-benchmark-template: stage: test artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" @@ -523,26 +523,26 @@ subsystem-benchmark-availability-recovery: - .docker-env - .common-refs - .run-immediately - script: - - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks tags: - benchmark + +subsystem-benchmark-availability-recovery: + extends: + - .subsystem-benchmark-template + script: + - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks allow_failure: true subsystem-benchmark-availability-distribution: - stage: test - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: always - expire_in: 1 hour - paths: - - charts/ extends: - - .docker-env - - .common-refs - - .run-immediately + - .subsystem-benchmark-template script: - cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks - tags: - - benchmark + allow_failure: true + +subsystem-benchmark-approval-voting: + extends: + - .subsystem-benchmark-template + script: + - cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks allow_failure: true diff --git a/Cargo.lock b/Cargo.lock index c932927a905..fa5c42c1fa3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13017,6 +13017,7 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index ced7706c40a..473bc67923b 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -53,3 +53,14 @@ kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } log = { workspace = true, default-features = true } env_logger = "0.11" + +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[bench]] +name = "approval-voting-regression-bench" +path = "benches/approval-voting-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs new file mode 100644 index 00000000000..cad45dc64d2 --- /dev/null +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -0,0 +1,94 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! approval-voting throughput test +//! +//! Approval Voting benchmark based on Kusama parameters and scale. +//! +//! Subsystems involved: +//! - approval-distribution +//! - approval-voting + +use polkadot_subsystem_bench::{ + self, + approval::{bench_approvals, prepare_test, ApprovalsOptions}, + configuration::TestConfiguration, + usage::BenchmarkUsage, + utils::save_to_file, +}; +use std::io::Write; + +const BENCH_COUNT: usize = 10; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + let mut config = TestConfiguration::default(); + config.n_cores = 100; + config.n_validators = 500; + config.num_blocks = 10; + config.peer_bandwidth = 524288000000; + config.bandwidth = 524288000000; + config.latency = None; + config.connectivity = 100; + config.generate_pov_sizes(); + let options = ApprovalsOptions { + last_considered_tranche: 89, + coalesce_mean: 3.0, + coalesce_std_dev: 1.0, + coalesce_tranche_diff: 12, + enable_assignments_v2: true, + stop_when_approved: false, + workdir_prefix: "/tmp".to_string(), + num_no_shows_per_candidate: 0, + }; + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, state) = prepare_test(config.clone(), options.clone(), false); + env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state)) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); + + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 52944.7000, 0.001), + ("Sent to peers", 63532.2000, 0.001), + ])); + messages.extend(average_usage.check_cpu_usage(&[ + ("approval-distribution", 7.7883, 0.1), + ("approval-voting", 10.4655, 0.1), + ])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} -- GitLab From fb8b64e32b0d59e0e50265fd213837a971895d37 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 22 Apr 2024 18:16:42 +0200 Subject: [PATCH 048/107] [subsystem-benchmark] Fix results filename for approval-voting benches (#4243) --- .../approval-voting/benches/approval-voting-regression-bench.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index cad45dc64d2..7157362a79c 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -68,7 +68,7 @@ fn main() -> Result<(), String> { let average_usage = BenchmarkUsage::average(&usages); save_to_file( - "charts/availability-distribution-regression-bench.json", + "charts/approval-voting-regression-bench.json", average_usage.to_chart_json().map_err(|e| e.to_string())?, ) .map_err(|e| e.to_string())?; -- GitLab From 3380e21cd92690c2066f686164a954ba7cd17244 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 22 Apr 2024 18:34:29 +0200 Subject: [PATCH 049/107] Use default branch of `psvm` when synchronizing templates (#4240) We cannot lock to a specific version of `psvm`, because we will need to keep it up-to-date - each release currently requires a change in `psvm` such as [this one](https://github.com/paritytech/psvm/pull/2/files). There is no `stable` branch in `psvm` repo or anything so using the default branch. --- .github/workflows/sync-templates.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-templates.yml b/.github/workflows/sync-templates.yml index 511c9d0e8cd..3617d6c34a3 100644 --- a/.github/workflows/sync-templates.yml +++ b/.github/workflows/sync-templates.yml @@ -61,7 +61,7 @@ jobs: - name: Install toml-cli run: cargo install --git https://github.com/gnprice/toml-cli --rev ea69e9d2ca4f0f858110dc7a5ae28bcb918c07fb # v0.2.3 - name: Install Polkadot SDK Version Manager - run: cargo install --git https://github.com/paritytech/psvm --rev c41261ffb52ab0c115adbbdb17e2cb7900d2bdfd psvm # master + run: cargo install --git https://github.com/paritytech/psvm psvm - name: Rust compilation prerequisites run: | sudo apt update -- GitLab From bd9287f766bded2022036a63d12fb86a2f7174a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 22 Apr 2024 21:28:27 +0200 Subject: [PATCH 050/107] wasm-builder: Make it easier to build a WASM binary (#4177) Basically combines all the recommended calls into one `build_using_defaults()` call or `init_with_defaults()` when there are some custom changes required. --- .../runtimes/assets/asset-hub-rococo/build.rs | 6 +--- .../assets/asset-hub-westend/build.rs | 6 +--- .../collectives/collectives-westend/build.rs | 6 +--- .../contracts/contracts-rococo/build.rs | 6 +--- .../runtimes/glutton/glutton-westend/build.rs | 6 +--- .../runtimes/testing/penpal/build.rs | 6 +--- .../testing/rococo-parachain/build.rs | 6 +--- cumulus/test/runtime/build.rs | 10 ++---- polkadot/runtime/rococo/build.rs | 11 ++----- polkadot/runtime/test-runtime/build.rs | 6 +--- polkadot/runtime/westend/build.rs | 6 +--- prdoc/pr_4177.prdoc | 12 +++++++ substrate/utils/wasm-builder/src/builder.rs | 33 +++++++++++++++++++ substrate/utils/wasm-builder/src/lib.rs | 12 ++----- templates/minimal/runtime/build.rs | 6 +--- templates/parachain/runtime/build.rs | 6 +--- templates/solochain/runtime/build.rs | 6 +--- 17 files changed, 64 insertions(+), 86 deletions(-) create mode 100644 prdoc/pr_4177.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs index 60f8a125129..239ccac19ec 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs index 60f8a125129..239ccac19ec 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs index 60f8a125129..239ccac19ec 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs index 60f8a125129..239ccac19ec 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs index 1580e6f07be..2f311357403 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs @@ -16,9 +16,5 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + WasmBuilder::build_using_defaults(); } diff --git a/cumulus/parachains/runtimes/testing/penpal/build.rs b/cumulus/parachains/runtimes/testing/penpal/build.rs index 9c9cde9a25a..c2fa89aa702 100644 --- a/cumulus/parachains/runtimes/testing/penpal/build.rs +++ b/cumulus/parachains/runtimes/testing/penpal/build.rs @@ -16,11 +16,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs index 60f8a125129..239ccac19ec 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 5e5f6a35a50..ebd5c178cba 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -18,16 +18,10 @@ fn main() { use substrate_wasm_builder::WasmBuilder; - WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); + WasmBuilder::build_using_defaults(); - WasmBuilder::new() - .with_current_project() + WasmBuilder::init_with_defaults() .enable_feature("increment-spec-version") - .import_memory() .set_file_name("wasm_binary_spec_version_incremented.rs") .build(); } diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index 0b7ee77b0d0..403c31ff21c 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -16,18 +16,11 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build(); + substrate_wasm_builder::WasmBuilder::build_using_defaults(); - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() + substrate_wasm_builder::WasmBuilder::init_with_defaults() .set_file_name("fast_runtime_binary.rs") .enable_feature("fast-runtime") - .import_memory() - .export_heap_base() .build(); } diff --git a/polkadot/runtime/test-runtime/build.rs b/polkadot/runtime/test-runtime/build.rs index 404ba3f2fdb..caf24317d0b 100644 --- a/polkadot/runtime/test-runtime/build.rs +++ b/polkadot/runtime/test-runtime/build.rs @@ -17,9 +17,5 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build() + WasmBuilder::build_using_defaults(); } diff --git a/polkadot/runtime/westend/build.rs b/polkadot/runtime/westend/build.rs index 428c971bc13..0b3e12c78c7 100644 --- a/polkadot/runtime/westend/build.rs +++ b/polkadot/runtime/westend/build.rs @@ -17,9 +17,5 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build() + WasmBuilder::build_using_defaults(); } diff --git a/prdoc/pr_4177.prdoc b/prdoc/pr_4177.prdoc new file mode 100644 index 00000000000..29d011c9351 --- /dev/null +++ b/prdoc/pr_4177.prdoc @@ -0,0 +1,12 @@ +title: "wasm-builder: Make it easier to build a WASM binary" + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Combines all the recommended calls of the `WasmBuilder` into + `build_using_defaults()` or `init_with_defaults()` if more changes are required. + Otherwise the interface doesn't change and users can still continue to use + the "old" interface. + +crates: + - name: substrate-wasm-builder diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index d2aaff448bc..163703fbec6 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -116,6 +116,39 @@ impl WasmBuilder { WasmBuilderSelectProject { _ignore: () } } + /// Build the WASM binary using the recommended default values. + /// + /// This is the same as calling: + /// ```no_run + /// substrate_wasm_builder::WasmBuilder::new() + /// .with_current_project() + /// .import_memory() + /// .export_heap_base() + /// .build(); + /// ``` + pub fn build_using_defaults() { + WasmBuilder::new() + .with_current_project() + .import_memory() + .export_heap_base() + .build(); + } + + /// Init the wasm builder with the recommended default values. + /// + /// In contrast to [`Self::build_using_defaults`] it does not build the WASM binary directly. + /// + /// This is the same as calling: + /// ```no_run + /// substrate_wasm_builder::WasmBuilder::new() + /// .with_current_project() + /// .import_memory() + /// .export_heap_base(); + /// ``` + pub fn init_with_defaults() -> Self { + WasmBuilder::new().with_current_project().import_memory().export_heap_base() + } + /// Enable exporting `__heap_base` as global variable in the WASM binary. /// /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 178e499e8f5..9ebab38b9cb 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -33,15 +33,9 @@ //! use substrate_wasm_builder::WasmBuilder; //! //! fn main() { -//! WasmBuilder::new() -//! // Tell the builder to build the project (crate) this `build.rs` is part of. -//! .with_current_project() -//! // Make sure to export the `heap_base` global, this is required by Substrate -//! .export_heap_base() -//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) -//! .import_memory() -//! // Build it. -//! .build() +//! // Builds the WASM binary using the recommended defaults. +//! // If you need more control, you can call `new` or `init_with_defaults`. +//! WasmBuilder::build_using_defaults(); //! } //! ``` //! diff --git a/templates/minimal/runtime/build.rs b/templates/minimal/runtime/build.rs index b7676a70dfe..e6f92757e22 100644 --- a/templates/minimal/runtime/build.rs +++ b/templates/minimal/runtime/build.rs @@ -18,10 +18,6 @@ fn main() { #[cfg(feature = "std")] { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } } diff --git a/templates/parachain/runtime/build.rs b/templates/parachain/runtime/build.rs index 02d6973f29c..bb05afe02b1 100644 --- a/templates/parachain/runtime/build.rs +++ b/templates/parachain/runtime/build.rs @@ -1,10 +1,6 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } /// The wasm builder is deactivated when compiling diff --git a/templates/solochain/runtime/build.rs b/templates/solochain/runtime/build.rs index c03d618535b..f262c320393 100644 --- a/templates/solochain/runtime/build.rs +++ b/templates/solochain/runtime/build.rs @@ -1,10 +1,6 @@ fn main() { #[cfg(feature = "std")] { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } } -- GitLab From 84c294c3821baf8b81693ce6e5615b9e157b5303 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 23 Apr 2024 01:10:07 +0300 Subject: [PATCH 051/107] [testnets] remove XCM SafeCallFilter for chains using Weights::v3 (#4199) Weights::v3 also accounts for PoV weight so we no longer need the SafeCallFilter. All calls are allowed as long as they "fit in the block". --- .../assets/asset-hub-rococo/src/xcm_config.rs | 223 +---------------- .../asset-hub-westend/src/xcm_config.rs | 228 +----------------- .../bridge-hub-rococo/src/xcm_config.rs | 114 +-------- .../bridge-hub-westend/src/xcm_config.rs | 74 +----- .../collectives-westend/src/xcm_config.rs | 83 +------ .../coretime-rococo/src/xcm_config.rs | 49 +--- .../coretime-westend/src/xcm_config.rs | 48 +--- .../people/people-rococo/src/xcm_config.rs | 55 +---- .../people/people-westend/src/xcm_config.rs | 55 +---- prdoc/pr_4199.prdoc | 29 +++ 10 files changed, 56 insertions(+), 902 deletions(-) create mode 100644 prdoc/pr_4199.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index fceb82b6b06..dbf27fb39ac 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -61,7 +61,7 @@ use xcm_builder::{ WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const TokenLocation: Location = Location::parent(); @@ -263,223 +263,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| { - k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) || - k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) || - k.eq(&bridging::to_ethereum::BridgeHubEthereumBaseFee::key()) - }) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::PoolAssets( - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } | - pallet_asset_conversion::Call::add_liquidity { .. } | - pallet_asset_conversion::Call::remove_liquidity { .. } | - pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | - pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, - ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } | - pallet_nft_fractionalization::Call::unify { .. }, - ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, - ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToWestendXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -632,8 +415,8 @@ impl xcm_executor::Config for XcmConfig { type MessageExporter = (); type UniversalAliases = (bridging::to_westend::UniversalAliases, bridging::to_ethereum::UniversalAliases); - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 41e941ee9a2..ed8a58af396 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -57,7 +57,7 @@ use xcm_builder::{ WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const WestendLocation: Location = Location::parent(); @@ -275,228 +275,6 @@ impl Contains for AmbassadorEntities { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || - items - .iter() - .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::PoolAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } | - pallet_asset_conversion::Call::add_liquidity { .. } | - pallet_asset_conversion::Call::remove_liquidity { .. } | - pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | - pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, - ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } | - pallet_nft_fractionalization::Call::unify { .. }, - ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, - ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -653,8 +431,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = (bridging::to_rococo::UniversalAliases,); - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 063c999aa7a..f354ccce21f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -19,22 +19,12 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; -use crate::{ - bridge_common_config::{ - BridgeGrandpaRococoBulletinInstance, BridgeGrandpaWestendInstance, - BridgeParachainWestendInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, - }, - bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, - bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, - EthereumGatewayAddress, -}; use bp_messages::LaneId; use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::ChainId; use frame_support::{ parameter_types, traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing}, - StoragePrefixedMap, }; use frame_system::EnsureRoot; use pallet_collator_selection::StakingPotAccountId; @@ -64,7 +54,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeToAccount, }; use xcm_executor::{ - traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset, WithOriginFilter}, + traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset}, XcmExecutor, }; @@ -138,104 +128,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| { - k.eq(&DeliveryRewardInBalance::key()) || - k.eq(&RequiredStakeForStakeAndSlash::key()) || - k.eq(&EthereumGatewayAddress::key()) || - // Allow resetting of Ethereum nonces in Rococo only. - k.starts_with(&snowbridge_pallet_inbound_queue::Nonce::::final_prefix()) || - k.starts_with(&snowbridge_pallet_outbound_queue::Nonce::::final_prefix()) - }) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaWestendInstance, - >::initialize { .. }) | - RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaWestendInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeWestendParachains(pallet_bridge_parachains::Call::< - Runtime, - BridgeParachainWestendInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeWestendMessages(pallet_bridge_messages::Call::< - Runtime, - WithBridgeHubWestendMessagesInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgePolkadotBulletinGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoBulletinInstance, - >::initialize { .. }) | - RuntimeCall::BridgePolkadotBulletinGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoBulletinInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgePolkadotBulletinMessages(pallet_bridge_messages::Call::< - Runtime, - WithRococoBulletinMessagesInstance, - >::set_operating_mode { .. }) | - RuntimeCall::EthereumBeaconClient( - snowbridge_pallet_ethereum_client::Call::force_checkpoint { .. } | - snowbridge_pallet_ethereum_client::Call::set_operating_mode { .. }, - ) | RuntimeCall::EthereumInboundQueue( - snowbridge_pallet_inbound_queue::Call::set_operating_mode { .. }, - ) | RuntimeCall::EthereumOutboundQueue( - snowbridge_pallet_outbound_queue::Call::set_operating_mode { .. }, - ) | RuntimeCall::EthereumSystem( - snowbridge_pallet_system::Call::upgrade { .. } | - snowbridge_pallet_system::Call::set_operating_mode { .. } | - snowbridge_pallet_system::Call::set_pricing_parameters { .. } | - snowbridge_pallet_system::Call::force_update_channel { .. } | - snowbridge_pallet_system::Call::force_transfer_native_from_agent { .. } | - snowbridge_pallet_system::Call::set_token_transfer_fees { .. }, - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -337,8 +229,8 @@ impl xcm_executor::Config for XcmConfig { crate::bridge_to_ethereum_config::SnowbridgeExporter, ); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 4870b4a52d7..31c37c8ffab 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -19,7 +19,6 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; -use crate::bridge_common_config::{DeliveryRewardInBalance, RequiredStakeForStakeAndSlash}; use frame_support::{ parameter_types, traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing}, @@ -48,7 +47,7 @@ use xcm_builder::{ WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const WestendLocation: Location = Location::parent(); @@ -119,73 +118,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| { - k.eq(&DeliveryRewardInBalance::key()) | - k.eq(&RequiredStakeForStakeAndSlash::key()) - }) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, - >::initialize { .. }) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeRococoParachains(pallet_bridge_parachains::Call::< - Runtime, - crate::bridge_to_rococo_config::BridgeParachainRococoInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeRococoMessages(pallet_bridge_messages::Call::< - Runtime, - crate::bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >::set_operating_mode { .. }) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -265,8 +197,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (crate::bridge_to_rococo_config::ToBridgeHubRococoHaulBlobExporter,); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 21ccd3b9cdb..4449284b8aa 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -44,7 +44,7 @@ use xcm_builder::{ TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const WndLocation: Location = Location::parent(); @@ -138,83 +138,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Alliance( - // `init_members` accepts unbounded vecs as arguments, - // but the call can be initiated only by root origin. - pallet_alliance::Call::init_members { .. } | - pallet_alliance::Call::vote { .. } | - pallet_alliance::Call::disband { .. } | - pallet_alliance::Call::set_rule { .. } | - pallet_alliance::Call::announce { .. } | - pallet_alliance::Call::remove_announcement { .. } | - pallet_alliance::Call::join_alliance { .. } | - pallet_alliance::Call::nominate_ally { .. } | - pallet_alliance::Call::elevate_ally { .. } | - pallet_alliance::Call::give_retirement_notice { .. } | - pallet_alliance::Call::retire { .. } | - pallet_alliance::Call::kick_member { .. } | - pallet_alliance::Call::close { .. } | - pallet_alliance::Call::abdicate_fellow_status { .. }, - ) | RuntimeCall::AllianceMotion( - pallet_collective::Call::vote { .. } | - pallet_collective::Call::disapprove_proposal { .. } | - pallet_collective::Call::close { .. }, - ) | RuntimeCall::FellowshipCollective( - pallet_ranked_collective::Call::add_member { .. } | - pallet_ranked_collective::Call::promote_member { .. } | - pallet_ranked_collective::Call::demote_member { .. } | - pallet_ranked_collective::Call::remove_member { .. }, - ) | RuntimeCall::FellowshipCore( - pallet_core_fellowship::Call::bump { .. } | - pallet_core_fellowship::Call::set_params { .. } | - pallet_core_fellowship::Call::set_active { .. } | - pallet_core_fellowship::Call::approve { .. } | - pallet_core_fellowship::Call::induct { .. } | - pallet_core_fellowship::Call::promote { .. } | - pallet_core_fellowship::Call::offboard { .. } | - pallet_core_fellowship::Call::submit_evidence { .. } | - pallet_core_fellowship::Call::import { .. }, - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -287,8 +210,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 7eab53b8a7c..3e71730e015 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -48,7 +48,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const RocRelayLocation: Location = Location::parent(); @@ -139,49 +139,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. } | - // Should not be in Polkadot/Kusama. Here in order to speed up testing. - frame_system::Call::set_storage { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::Sudo(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::Broker(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -258,8 +215,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index e1452ec63f2..fc7ecf1e61c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -48,7 +48,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const TokenRelayLocation: Location = Location::parent(); @@ -146,48 +146,6 @@ impl Contains for FellowsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. } | - // Should not be in Polkadot/Kusama. Here in order to speed up testing. - frame_system::Call::set_storage { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::Broker(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -265,8 +223,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index 1a42adeafd1..e4e4fa1b2c4 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -45,7 +45,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); @@ -148,55 +148,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Identity(..) | - RuntimeCall::IdentityMigrator(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -270,8 +221,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 11492327064..590f23f6853 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -45,7 +45,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); @@ -155,55 +155,6 @@ impl Contains for FellowsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Identity(..) | - RuntimeCall::IdentityMigrator(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -278,8 +229,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/prdoc/pr_4199.prdoc b/prdoc/pr_4199.prdoc new file mode 100644 index 00000000000..39f08a0532b --- /dev/null +++ b/prdoc/pr_4199.prdoc @@ -0,0 +1,29 @@ +title: "Remove XCM SafeCallFilter for chains using Weights::v3" + +doc: + - audience: Runtime User + description: | + `SafeCallFilter` was removed from Rococo and Westend relay and system chains as they + all now use Weights::v3 which already accounts for call PoV size. + This effectively removes artificial limitations on what users can `XCM::Transact` on + these chains (blockspace limitations are still upheld). + +crates: + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor -- GitLab From 157294b0d39f1b3dd7307a70c77de5267134ede2 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:17:06 +0300 Subject: [PATCH 052/107] Add metric for time spent waiting in the execution queue (#4250) Add a metric to be able to understand the time jobs are waiting in the execution queue waiting for an available worker. https://github.com/paritytech/polkadot-sdk/issues/4126 Signed-off-by: Alexandru Gheorghe --- polkadot/node/core/pvf/src/execute/queue.rs | 3 ++ polkadot/node/core/pvf/src/metrics.rs | 32 +++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index af147a2ba22..bb00a5a652d 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -562,6 +562,9 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { thus claim_idle cannot return None; qed.", ); + queue + .metrics + .observe_execution_queued_time(job.waiting_since.elapsed().as_millis() as u32); let execution_timer = queue.metrics.time_execution(); queue.mux.push( async move { diff --git a/polkadot/node/core/pvf/src/metrics.rs b/polkadot/node/core/pvf/src/metrics.rs index 7fd876cf174..bc8d300037f 100644 --- a/polkadot/node/core/pvf/src/metrics.rs +++ b/polkadot/node/core/pvf/src/metrics.rs @@ -74,6 +74,12 @@ impl Metrics { self.0.as_ref().map(|metrics| metrics.execution_time.start_timer()) } + pub(crate) fn observe_execution_queued_time(&self, queued_for_millis: u32) { + self.0.as_ref().map(|metrics| { + metrics.execution_queued_time.observe(queued_for_millis as f64 / 1000 as f64) + }); + } + /// Observe memory stats for preparation. #[allow(unused_variables)] pub(crate) fn observe_preparation_memory_metrics(&self, memory_stats: MemoryStats) { @@ -112,6 +118,7 @@ struct MetricsInner { execute_finished: prometheus::Counter, preparation_time: prometheus::Histogram, execution_time: prometheus::Histogram, + execution_queued_time: prometheus::Histogram, #[cfg(target_os = "linux")] preparation_max_rss: prometheus::Histogram, // Max. allocated memory, tracked by Jemallocator, polling-based @@ -240,6 +247,31 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + execution_queued_time: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_pvf_execution_queued_time", + "Time spent in queue waiting for PVFs execution job to be assigned", + ).buckets(vec![ + 0.01, + 0.025, + 0.05, + 0.1, + 0.25, + 0.5, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 12.0, + 24.0, + 48.0, + ]), + )?, + registry, + )?, #[cfg(target_os = "linux")] preparation_max_rss: prometheus::register( prometheus::Histogram::with_opts( -- GitLab From 7f1646eb3837bfa53fb1cb8eabd7a0e1026469b8 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 23 Apr 2024 10:38:20 +0200 Subject: [PATCH 053/107] Add `validate_xcm_nesting` to the `ParentAsUmp` and `ChildParachainRouter` (#4236) This PR: - moves `validate_xcm_nesting` from `XcmpQueue` into the `VersionedXcm` - adds `validate_xcm_nesting` to the `ParentAsUmp` - adds `validate_xcm_nesting` to the `ChildParachainRouter` Based on discussion [here](https://github.com/paritytech/polkadot-sdk/pull/4186#discussion_r1571344270) and/or [here](https://github.com/paritytech/polkadot-sdk/pull/4186#discussion_r1572076666) and/or [here]() ## Question/TODO - [x] To the [comment](https://github.com/paritytech/polkadot-sdk/pull/4186#discussion_r1572072295) - Why was `validate_xcm_nesting` added just to the `XcmpQueue` router and nowhere else? What kind of problem `MAX_XCM_DECODE_DEPTH` is solving? (see [comment](https://github.com/paritytech/polkadot-sdk/pull/4236#discussion_r1574605191)) --- cumulus/pallets/xcmp-queue/src/lib.rs | 17 +--- .../assets/asset-hub-rococo/src/lib.rs | 16 ++-- .../assets/asset-hub-westend/src/lib.rs | 16 ++-- cumulus/primitives/utility/src/lib.rs | 28 ++++++ .../runtime/common/src/integration_tests.rs | 5 +- polkadot/runtime/common/src/xcm_sender.rs | 44 ++++++++- .../src/fungible/benchmarking.rs | 26 ++++-- .../src/fungible/mock.rs | 7 +- .../src/generic/benchmarking.rs | 36 ++++--- .../pallet-xcm-benchmarks/src/generic/mock.rs | 5 +- polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs | 13 ++- polkadot/xcm/src/lib.rs | 93 ++++++++++++++++++- 12 files changed, 237 insertions(+), 69 deletions(-) diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index deced13a9e8..7de2fd80942 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -916,7 +916,8 @@ impl SendXcm for Pallet { let price = T::PriceForSiblingDelivery::price_for_delivery(id, &xcm); let versioned_xcm = T::VersionWrapper::wrap_version(&d, xcm) .map_err(|()| SendError::DestinationUnsupported)?; - validate_xcm_nesting(&versioned_xcm) + versioned_xcm + .validate_xcm_nesting() .map_err(|()| SendError::ExceedsMaxMessageSize)?; Ok(((id, versioned_xcm), price)) @@ -932,10 +933,6 @@ impl SendXcm for Pallet { fn deliver((id, xcm): (ParaId, VersionedXcm<()>)) -> Result { let hash = xcm.using_encoded(sp_io::hashing::blake2_256); - defensive_assert!( - validate_xcm_nesting(&xcm).is_ok(), - "Tickets are valid prior to delivery by trait XCM; qed" - ); match Self::send_fragment(id, XcmpMessageFormat::ConcatenatedVersionedXcm, xcm) { Ok(_) => { @@ -950,16 +947,6 @@ impl SendXcm for Pallet { } } -/// Checks that the XCM is decodable with `MAX_XCM_DECODE_DEPTH`. -/// -/// Note that this uses the limit of the sender - not the receiver. It it best effort. -pub(crate) fn validate_xcm_nesting(xcm: &VersionedXcm<()>) -> Result<(), ()> { - xcm.using_encoded(|mut enc| { - VersionedXcm::<()>::decode_all_with_depth_limit(MAX_XCM_DECODE_DEPTH, &mut enc).map(|_| ()) - }) - .map_err(|_| ()) -} - impl FeeTracker for Pallet { type Id = ParaId; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 5cb29343a1c..201647ac2eb 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1519,27 +1519,23 @@ impl_runtime_apis! { fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles.saturating_sub(1); + let holding_fungibles = holding_non_fungibles.saturating_sub(2); // -2 for two `iter::once` bellow let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) + (0..holding_fungibles) .map(|i| { Asset { id: GeneralIndex(i as u128).into(), - fun: Fungible(fungibles_amount * i as u128), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount } }) .chain(core::iter::once(Asset { id: Here.into(), fun: Fungible(u128::MAX) })) + .chain(core::iter::once(Asset { id: AssetId(TokenLocation::get()), fun: Fungible(1_000_000 * UNITS) })) .chain((0..holding_non_fungibles).map(|i| Asset { id: GeneralIndex(i as u128).into(), fun: NonFungible(asset_instance_from(i)), })) - .collect::>(); - - assets.push(Asset { - id: AssetId(TokenLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() + .collect::>() + .into() } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 366fb91723a..78c83cf6922 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1610,27 +1610,23 @@ impl_runtime_apis! { fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles - 1; + let holding_fungibles = holding_non_fungibles - 2; // -2 for two `iter::once` bellow let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) + (0..holding_fungibles) .map(|i| { Asset { id: AssetId(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount } }) .chain(core::iter::once(Asset { id: AssetId(Here.into()), fun: Fungible(u128::MAX) })) + .chain(core::iter::once(Asset { id: AssetId(WestendLocation::get()), fun: Fungible(1_000_000 * UNITS) })) .chain((0..holding_non_fungibles).map(|i| Asset { id: AssetId(GeneralIndex(i as u128).into()), fun: NonFungible(asset_instance_from(i)), })) - .collect::>(); - - assets.push(Asset { - id: AssetId(WestendLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() + .collect::>() + .into() } } diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index d5d411356dc..54f40bd0109 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -69,6 +69,9 @@ where let price = P::price_for_delivery((), &xcm); let versioned_xcm = W::wrap_version(&d, xcm).map_err(|()| SendError::DestinationUnsupported)?; + versioned_xcm + .validate_xcm_nesting() + .map_err(|()| SendError::ExceedsMaxMessageSize)?; let data = versioned_xcm.encode(); Ok((data, price)) @@ -526,6 +529,8 @@ impl< mod test_xcm_router { use super::*; use cumulus_primitives_core::UpwardMessage; + use frame_support::assert_ok; + use xcm::MAX_XCM_DECODE_DEPTH; /// Validates [`validate`] for required Some(destination) and Some(message) struct OkFixedXcmHashWithAssertingRequiredInputsSender; @@ -621,6 +626,29 @@ mod test_xcm_router { )>(dest.into(), message) ); } + + #[test] + fn parent_as_ump_validate_nested_xcm_works() { + let dest = Parent; + + type Router = ParentAsUmp<(), (), ()>; + + // Message that is not too deeply nested: + let mut good = Xcm(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + + // Check that the good message is validated: + assert_ok!(::validate(&mut Some(dest.into()), &mut Some(good.clone()))); + + // Nesting the message one more time should reject it: + let bad = Xcm(vec![SetAppendix(good)]); + assert_eq!( + Err(SendError::ExceedsMaxMessageSize), + ::validate(&mut Some(dest.into()), &mut Some(bad)) + ); + } } #[cfg(test)] mod test_trader { diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 91b64ef7259..3e9ac1fc1b1 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -39,7 +39,7 @@ use primitives::{ MAX_CODE_SIZE, }; use runtime_parachains::{ - configuration, origin, paras, shared, Origin as ParaOrigin, ParaLifecycle, + configuration, dmp, origin, paras, shared, Origin as ParaOrigin, ParaLifecycle, }; use sp_core::H256; use sp_io::TestExternalities; @@ -84,6 +84,7 @@ frame_support::construct_runtime!( Paras: paras, ParasShared: shared, ParachainsOrigin: origin, + Dmp: dmp, // Para Onboarding Pallets Registrar: paras_registrar, @@ -201,6 +202,8 @@ impl shared::Config for Test { type DisabledValidators = (); } +impl dmp::Config for Test {} + impl origin::Config for Test {} parameter_types! { diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 0cbc2e603c8..a712d4381f7 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -119,7 +119,9 @@ where let config = configuration::ActiveConfig::::get(); let para = id.into(); let price = P::price_for_delivery(para, &xcm); - let blob = W::wrap_version(&d, xcm).map_err(|()| DestinationUnsupported)?.encode(); + let versioned_xcm = W::wrap_version(&d, xcm).map_err(|()| DestinationUnsupported)?; + versioned_xcm.validate_xcm_nesting().map_err(|()| ExceedsMaxMessageSize)?; + let blob = versioned_xcm.encode(); dmp::Pallet::::can_queue_downward_message(&config, ¶, &blob) .map_err(Into::::into)?; @@ -236,9 +238,11 @@ impl EnsureForParachain for () { #[cfg(test)] mod tests { use super::*; - use frame_support::parameter_types; + use crate::integration_tests::new_test_ext; + use frame_support::{assert_ok, parameter_types}; use runtime_parachains::FeeTracker; use sp_runtime::FixedU128; + use xcm::MAX_XCM_DECODE_DEPTH; parameter_types! { pub const BaseDeliveryFee: u128 = 300_000_000; @@ -297,4 +301,40 @@ mod tests { (FeeAssetId::get(), result).into() ); } + + #[test] + fn child_parachain_router_validate_nested_xcm_works() { + let dest = Parachain(5555); + + type Router = ChildParachainRouter< + crate::integration_tests::Test, + (), + NoPriceForMessageDelivery, + >; + + // Message that is not too deeply nested: + let mut good = Xcm(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + + new_test_ext().execute_with(|| { + configuration::ActiveConfig::::mutate(|c| { + c.max_downward_message_size = u32::MAX; + }); + + // Check that the good message is validated: + assert_ok!(::validate( + &mut Some(dest.into()), + &mut Some(good.clone()) + )); + + // Nesting the message one more time should reject it: + let bad = Xcm(vec![SetAppendix(good)]); + assert_eq!( + Err(ExceedsMaxMessageSize), + ::validate(&mut Some(dest.into()), &mut Some(bad)) + ); + }); + } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index 4b77199069d..d99da9184b5 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -146,8 +146,6 @@ benchmarks_instance_pallet! { initiate_reserve_withdraw { let (sender_account, sender_location) = account_and_location::(1); - let holding = T::worst_case_holding(1); - let assets_filter = AssetFilter::Definite(holding.clone().into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()); let reserve = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( @@ -157,15 +155,29 @@ benchmarks_instance_pallet! { ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // generate holding and add possible required fees + let holding = if let Some(expected_assets_in_holding) = expected_assets_in_holding { + let mut holding = T::worst_case_holding(1 + expected_assets_in_holding.len() as u32); + for a in expected_assets_in_holding.into_inner() { + holding.push(a); + } + holding + } else { + T::worst_case_holding(1) + }; + let mut executor = new_executor::(sender_location); - executor.set_holding(holding.into()); + executor.set_holding(holding.clone().into()); if let Some(expected_fees_mode) = expected_fees_mode { executor.set_fees_mode(expected_fees_mode); } - if let Some(expected_assets_in_holding) = expected_assets_in_holding { - executor.set_holding(expected_assets_in_holding.into()); - } - let instruction = Instruction::InitiateReserveWithdraw { assets: assets_filter, reserve, xcm: Xcm(vec![]) }; + + let instruction = Instruction::InitiateReserveWithdraw { + // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), + reserve, + xcm: Xcm(vec![]) + }; let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index d11f64e7494..bf7d4e589de 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -16,7 +16,7 @@ //! A mock runtime for XCM benchmarking. -use crate::{fungible as xcm_balances_benchmark, mock::*}; +use crate::{fungible as xcm_balances_benchmark, generate_holding_assets, mock::*}; use frame_benchmarking::BenchmarkError; use frame_support::{ derive_impl, parameter_types, @@ -130,9 +130,8 @@ impl crate::Config for Test { Ok(valid_destination) } fn worst_case_holding(depositable_count: u32) -> Assets { - crate::mock_worst_case_holding( - depositable_count, - ::MaxAssetsIntoHolding::get(), + generate_holding_assets( + ::MaxAssetsIntoHolding::get() - depositable_count, ) } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 8c6ed4b5d0e..760b21f9356 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -19,9 +19,9 @@ use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use codec::Encode; use frame_benchmarking::{benchmarks, BenchmarkError}; use frame_support::{dispatch::GetDispatchInfo, traits::fungible::Inspect}; -use sp_std::vec; +use sp_std::{prelude::*, vec}; use xcm::{ - latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight}, + latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, DoubleEncoded, }; use xcm_executor::{ @@ -32,7 +32,6 @@ use xcm_executor::{ benchmarks! { report_holding { let (sender_account, sender_location) = account_and_location::(1); - let holding = T::worst_case_holding(0); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( @@ -42,14 +41,22 @@ benchmarks! { ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // generate holding and add possible required fees + let holding = if let Some(expected_assets_in_holding) = expected_assets_in_holding { + let mut holding = T::worst_case_holding(expected_assets_in_holding.len() as u32); + for a in expected_assets_in_holding.into_inner() { + holding.push(a); + } + holding + } else { + T::worst_case_holding(0) + }; + let mut executor = new_executor::(sender_location); executor.set_holding(holding.clone().into()); if let Some(expected_fees_mode) = expected_fees_mode { executor.set_fees_mode(expected_fees_mode); } - if let Some(expected_assets_in_holding) = expected_assets_in_holding { - executor.set_holding(expected_assets_in_holding.into()); - } let instruction = Instruction::>::ReportHolding { response_info: QueryResponseInfo { @@ -57,8 +64,8 @@ benchmarks! { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly. - assets: Definite(holding), + // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), }; let xcm = Xcm(vec![instruction]); @@ -612,14 +619,19 @@ benchmarks! { let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // generate holding and add possible required fees + let mut holding: Assets = asset.clone().into(); + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + for a in expected_assets_in_holding.into_inner() { + holding.push(a); + } + }; + let mut executor = new_executor::(owner); - executor.set_holding(asset.clone().into()); + executor.set_holding(holding.into()); if let Some(expected_fees_mode) = expected_fees_mode { executor.set_fees_mode(expected_fees_mode); } - if let Some(expected_assets_in_holding) = expected_assets_in_holding { - executor.set_holding(expected_assets_in_holding.into()); - } let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index f41df017b9d..da0f28ccf28 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -132,9 +132,8 @@ impl crate::Config for Test { Ok(valid_destination) } fn worst_case_holding(depositable_count: u32) -> Assets { - crate::mock_worst_case_holding( - depositable_count, - ::MaxAssetsIntoHolding::get(), + generate_holding_assets( + ::MaxAssetsIntoHolding::get() - depositable_count, ) } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 63ed0ac0ca7..a43f27bf47e 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -50,6 +50,8 @@ pub trait Config: frame_system::Config { fn valid_destination() -> Result; /// Worst case scenario for a holding account in this runtime. + /// - `depositable_count` specifies the count of assets we plan to add to the holding on top of + /// those generated by the `worst_case_holding` implementation. fn worst_case_holding(depositable_count: u32) -> Assets; } @@ -64,19 +66,22 @@ pub type AssetTransactorOf = <::XcmConfig as XcmConfig>::AssetTr /// The call type of executor's config. Should eventually resolve to the same overarching call type. pub type XcmCallOf = <::XcmConfig as XcmConfig>::RuntimeCall; -pub fn mock_worst_case_holding(depositable_count: u32, max_assets: u32) -> Assets { +pub fn generate_holding_assets(max_assets: u32) -> Assets { let fungibles_amount: u128 = 100; - let holding_fungibles = max_assets / 2 - depositable_count; - let holding_non_fungibles = holding_fungibles; + let holding_fungibles = max_assets / 2; + let holding_non_fungibles = max_assets - holding_fungibles - 1; // -1 because of adding `Here` asset + // add count of `holding_fungibles` (0..holding_fungibles) .map(|i| { Asset { id: AssetId(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount } .into() }) + // add one more `Here` asset .chain(core::iter::once(Asset { id: AssetId(Here.into()), fun: Fungible(u128::MAX) })) + // add count of `holding_non_fungibles` .chain((0..holding_non_fungibles).map(|i| Asset { id: AssetId(GeneralIndex(i as u128).into()), fun: NonFungible(asset_instance_from(i)), diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index e836486e86e..198020ea126 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use derivative::Derivative; -use parity_scale_codec::{Decode, Encode, Error as CodecError, Input, MaxEncodedLen}; +use parity_scale_codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use scale_info::TypeInfo; pub mod v2; @@ -456,6 +456,23 @@ impl IdentifyVersion for VersionedXcm { } } +impl VersionedXcm { + /// Checks that the XCM is decodable with `MAX_XCM_DECODE_DEPTH`. Consequently, it also checks + /// all decode implementations and limits, such as MAX_ITEMS_IN_ASSETS or + /// MAX_INSTRUCTIONS_TO_DECODE. + /// + /// Note that this uses the limit of the sender - not the receiver. It is a best effort. + pub fn validate_xcm_nesting(&self) -> Result<(), ()> { + self.using_encoded(|mut enc| { + Self::decode_all_with_depth_limit(MAX_XCM_DECODE_DEPTH, &mut enc).map(|_| ()) + }) + .map_err(|e| { + log::error!(target: "xcm::validate_xcm_nesting", "Decode error: {e:?} for xcm: {self:?}!"); + () + }) + } +} + impl From> for VersionedXcm { fn from(x: v2::Xcm) -> Self { VersionedXcm::V2(x) @@ -704,3 +721,77 @@ fn size_limits() { } assert!(!test_failed); } + +#[test] +fn validate_xcm_nesting_works() { + use crate::latest::{ + prelude::{GeneralIndex, ReserveAssetDeposited, SetAppendix}, + Assets, Xcm, MAX_INSTRUCTIONS_TO_DECODE, MAX_ITEMS_IN_ASSETS, + }; + + // closure generates assets of `count` + let assets = |count| { + let mut assets = Assets::new(); + for i in 0..count { + assets.push((GeneralIndex(i as u128), 100).into()); + } + assets + }; + + // closer generates `Xcm` with nested instructions of `depth` + let with_instr = |depth| { + let mut xcm = Xcm::<()>(vec![]); + for _ in 0..depth - 1 { + xcm = Xcm::<()>(vec![SetAppendix(xcm)]); + } + xcm + }; + + // `MAX_INSTRUCTIONS_TO_DECODE` check + assert!(VersionedXcm::<()>::from(Xcm(vec![ + ReserveAssetDeposited(assets(1)); + (MAX_INSTRUCTIONS_TO_DECODE - 1) as usize + ])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ + ReserveAssetDeposited(assets(1)); + MAX_INSTRUCTIONS_TO_DECODE as usize + ])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ + ReserveAssetDeposited(assets(1)); + (MAX_INSTRUCTIONS_TO_DECODE + 1) as usize + ])) + .validate_xcm_nesting() + .is_err()); + + // `MAX_XCM_DECODE_DEPTH` check + assert!(VersionedXcm::<()>::from(with_instr(MAX_XCM_DECODE_DEPTH - 1)) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(with_instr(MAX_XCM_DECODE_DEPTH)) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(with_instr(MAX_XCM_DECODE_DEPTH + 1)) + .validate_xcm_nesting() + .is_err()); + + // `MAX_ITEMS_IN_ASSETS` check + assert!(VersionedXcm::<()>::from(Xcm(vec![ReserveAssetDeposited(assets( + MAX_ITEMS_IN_ASSETS + ))])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ReserveAssetDeposited(assets( + MAX_ITEMS_IN_ASSETS - 1 + ))])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ReserveAssetDeposited(assets( + MAX_ITEMS_IN_ASSETS + 1 + ))])) + .validate_xcm_nesting() + .is_err()); +} -- GitLab From ac4f421f0b99b73bbf80710206e9ac1463e8cb0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Apr 2024 11:51:11 +0200 Subject: [PATCH 054/107] parachains_coretime: Expose `MaxXCMTransactWeight` (#4189) This should be configured on the runtime level and not somewhere inside the pallet. --------- Co-authored-by: Adrian Catangiu Co-authored-by: Branislav Kontur --- .../runtime/parachains/src/coretime/migration.rs | 8 ++++---- polkadot/runtime/parachains/src/coretime/mod.rs | 15 +++++++++------ polkadot/runtime/parachains/src/mock.rs | 2 ++ polkadot/runtime/rococo/src/lib.rs | 2 ++ polkadot/runtime/westend/src/lib.rs | 2 ++ prdoc/pr_4189.prdoc | 16 ++++++++++++++++ 6 files changed, 35 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_4189.prdoc diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 72eda1ea3f3..4f52fc99ec3 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -222,7 +222,7 @@ mod v_coretime { mask: CoreMask::complete(), assignment: CoreAssignment::Task(p.into()), }]); - mk_coretime_call(crate::coretime::CoretimeCalls::Reserve(schedule)) + mk_coretime_call::(crate::coretime::CoretimeCalls::Reserve(schedule)) }); let leases = lease_holding.into_iter().filter_map(|p| { @@ -243,14 +243,14 @@ mod v_coretime { let round_up = if valid_until % TIME_SLICE_PERIOD > 0 { 1 } else { 0 }; let time_slice = valid_until / TIME_SLICE_PERIOD + TIME_SLICE_PERIOD * round_up; log::trace!(target: "coretime-migration", "Sending of lease holding para {:?}, valid_until: {:?}, time_slice: {:?}", p, valid_until, time_slice); - Some(mk_coretime_call(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) + Some(mk_coretime_call::(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) }); let core_count: u16 = configuration::ActiveConfig::::get() .scheduler_params .num_cores .saturated_into(); - let set_core_count = iter::once(mk_coretime_call( + let set_core_count = iter::once(mk_coretime_call::( crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), )); @@ -261,7 +261,7 @@ mod v_coretime { }]); // Reserved cores will come before lease cores, so cores will change their assignments // when coretime chain sends us their assign_core calls -> Good test. - mk_coretime_call(crate::coretime::CoretimeCalls::Reserve(schedule)) + mk_coretime_call::(crate::coretime::CoretimeCalls::Reserve(schedule)) }); let message_content = iter::once(Instruction::UnpaidExecution { diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 9095cd90ae0..a30f7336f69 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -110,6 +110,11 @@ pub mod pallet { /// Something that provides the weight of this pallet. type WeightInfo: WeightInfo; type SendXcm: SendXcm; + + /// Maximum weight for any XCM transact call that should be executed on the coretime chain. + /// + /// Basically should be `max_weight(set_leases, reserve, notify_core_count)`. + type MaxXcmTransactWeight: Get; } #[pallet::event] @@ -225,7 +230,7 @@ impl Pallet { weight_limit: WeightLimit::Unlimited, check_origin: None, }, - mk_coretime_call(crate::coretime::CoretimeCalls::NotifyCoreCount(core_count)), + mk_coretime_call::(crate::coretime::CoretimeCalls::NotifyCoreCount(core_count)), ]); if let Err(err) = send_xcm::( Location::new(0, [Junction::Parachain(T::BrokerId::get())]), @@ -244,7 +249,7 @@ impl Pallet { weight_limit: WeightLimit::Unlimited, check_origin: None, }, - mk_coretime_call(crate::coretime::CoretimeCalls::SwapLeases(one, other)), + mk_coretime_call::(crate::coretime::CoretimeCalls::SwapLeases(one, other)), ]); if let Err(err) = send_xcm::( Location::new(0, [Junction::Parachain(T::BrokerId::get())]), @@ -261,12 +266,10 @@ impl OnNewSession> for Pallet { } } -fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { +fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { Instruction::Transact { origin_kind: OriginKind::Superuser, - // Largest call is set_lease with 1526 byte: - // Longest call is reserve() with 31_000_000 - require_weight_at_most: Weight::from_parts(170_000_000, 20_000), + require_weight_at_most: T::MaxXcmTransactWeight::get(), call: BrokerRuntimePallets::Broker(call).encode().into(), } } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index c91f5be127c..97a75d47ff7 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -387,6 +387,7 @@ impl assigner_coretime::Config for Test {} parameter_types! { pub const BrokerId: u32 = 10u32; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } impl coretime::Config for Test { @@ -396,6 +397,7 @@ impl coretime::Config for Test { type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } pub struct DummyXcmSender; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index ba80fa6942c..1cfe9adfe13 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1057,6 +1057,7 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } impl coretime::Config for Runtime { @@ -1066,6 +1067,7 @@ impl coretime::Config for Runtime { type BrokerId = BrokerId; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 02933efff94..7924939c79b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1188,6 +1188,7 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } impl coretime::Config for Runtime { @@ -1197,6 +1198,7 @@ impl coretime::Config for Runtime { type BrokerId = BrokerId; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { diff --git a/prdoc/pr_4189.prdoc b/prdoc/pr_4189.prdoc new file mode 100644 index 00000000000..74ed7c67cbd --- /dev/null +++ b/prdoc/pr_4189.prdoc @@ -0,0 +1,16 @@ +title: "polkadot_runtime_parachains::coretime: Expose `MaxXcmTransactWeight`" + +doc: + - audience: Runtime Dev + description: | + Expose `MaxXcmTransactWeight` via the `Config` trait. This exposes the + possibility for runtime implementors to set the maximum weight required + for the calls on the coretime chain. Basically it needs to be set to + `max_weight(set_leases, reserve, notify_core_count)` where `set_leases` + etc are the calls on the coretime chain. This ensures that these XCM + transact calls send by the relay chain coretime pallet to the coretime + chain can be dispatched. + +crates: + - name: polkadot-runtime-parachains + bump: major -- GitLab From f7c1e0cf10f8c5c179697e7dfff1191c3095e47a Mon Sep 17 00:00:00 2001 From: AlexWang Date: Tue, 23 Apr 2024 22:49:39 +1200 Subject: [PATCH 055/107] Add OnFinality polkadot bootnode (#4247) This is for adding onfinality polkadot bootnode. Please correct me if this is not the right place for adding a new bootnode --- polkadot/node/service/chain-specs/polkadot.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 5f8d88102d7..03570543707 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -37,7 +37,8 @@ "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", - "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" + "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ", + "/dns4/polkadot-0.boot.onfinality.io/tcp/24446/ws/p2p/12D3KooWT1PWaNdAwYrSr89dvStnoGdH3t4LNRbcVNN4JCtsotkR" ], "telemetryEndpoints": [ [ -- GitLab From 5f2e66f5d0151e1ca9be1e0343147f24a735c476 Mon Sep 17 00:00:00 2001 From: sfuhfds Date: Tue, 23 Apr 2024 18:53:50 +0800 Subject: [PATCH 056/107] chore: fix some typos (#4253) --- polkadot/runtime/common/src/crowdloan/mod.rs | 2 +- polkadot/runtime/common/src/paras_registrar/mod.rs | 2 +- polkadot/runtime/parachains/src/inclusion/mod.rs | 2 +- polkadot/runtime/parachains/src/paras/mod.rs | 2 +- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 12078871a19..477530467fa 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -866,7 +866,7 @@ mod tests { use sp_core::H256; use std::{cell::RefCell, collections::BTreeMap, sync::Arc}; // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::{ crowdloan, mock::TestRegistrar, diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index cc949c9d3f6..a49ebab3e26 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -412,7 +412,7 @@ pub mod pallet { /// validators have reported on the validity of the code, the code will either be enacted /// or the upgrade will be rejected. If the code will be enacted, the current code of the /// parachain will be overwritten directly. This means that any PoV will be checked by this - /// new code. The parachain itself will not be informed explictely that the validation code + /// new code. The parachain itself will not be informed explicitly that the validation code /// has changed. /// /// Can be called by Root, the parachain, or the parachain manager if the parachain is diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 76caf740ebc..31befefa322 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -245,7 +245,7 @@ pub enum AggregateMessageOrigin { /// Identifies a UMP queue inside the `MessageQueue` pallet. /// /// It is written in verbose form since future variants like `Here` and `Bridged` are already -/// forseeable. +/// foreseeable. #[derive(Encode, Decode, Clone, MaxEncodedLen, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum UmpQueueId { /// The message originated from this parachain. diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 6f67c4b8c03..36a693bcc8e 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -641,7 +641,7 @@ pub mod pallet { /// /// This is only used at genesis or by root. /// - /// TODO: Remove once coretime is the standard accross all chains. + /// TODO: Remove once coretime is the standard across all chains. type AssignCoretime: AssignCoretime; } diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 2c6c48acc6d..ac4cf5dc8d4 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1099,7 +1099,7 @@ fn limit_and_sanitize_disputes< } // Helper function for filtering candidates which don't pass the given predicate. When/if the first -// candidate which failes the predicate is found, all the other candidates that follow are dropped. +// candidate which failed the predicate is found, all the other candidates that follow are dropped. fn retain_candidates< T: inclusion::Config + paras::Config + inclusion::Config, F: FnMut(ParaId, &mut C) -> bool, -- GitLab From 118cd6f922acc9c4b3938645cd34098275d41c93 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 23 Apr 2024 13:40:05 +0200 Subject: [PATCH 057/107] Ensure outbound XCMs are decodable with limits + add `EnsureDecodableXcm` router (for testing purposes) (#4186) This PR: - adds `EnsureDecodableXcm` (testing) router that attempts to *encode* and *decode* passed XCM `message` to ensure that the receiving side will be able to decode, at least with the same XCM version. - fixes `pallet_xcm` / `pallet_xcm_benchmarks` assets data generation Relates to investigation of https://substrate.stackexchange.com/questions/11288 and missing fix https://github.com/paritytech/polkadot-sdk/pull/2129 which did not get into the fellows 1.1.X release. ## Questions/TODOs - [x] fix XCM benchmarks, which produces undecodable data - new router catched at least two cases - `BoundedVec exceeds its limit` - `Fungible asset of zero amount is not allowed` - [x] do we need to add `sort` to the `prepend_with` as we did for reanchor [here](https://github.com/paritytech/polkadot-sdk/pull/2129)? @serban300 (**created separate/follow-up PR**: https://github.com/paritytech/polkadot-sdk/pull/4235) - [x] We added decoding check to `XcmpQueue` -> `validate_xcm_nesting`, why not to added to the `ParentAsUmp` or `ChildParachainRouter`? @franciscoaguirre (**created separate/follow-up PR**: https://github.com/paritytech/polkadot-sdk/pull/4236) - [ ] `SendController::send_blob` replace `VersionedXcm::<()>::decode(` with `VersionedXcm::<()>::decode_with_depth_limit(MAX_XCM_DECODE_DEPTH, data)` ? --------- Co-authored-by: Adrian Catangiu --- Cargo.lock | 8 ----- polkadot/runtime/test-runtime/Cargo.toml | 10 ------ .../runtime/test-runtime/constants/Cargo.toml | 6 ---- .../src/fungible/mock.rs | 6 ++-- .../pallet-xcm-benchmarks/src/generic/mock.rs | 5 +-- polkadot/xcm/pallet-xcm/src/mock.rs | 12 ++++--- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- polkadot/xcm/xcm-builder/src/routing.rs | 34 +++++++++++++++++++ polkadot/xcm/xcm-builder/src/tests/mock.rs | 7 ++-- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 12 ++++--- .../xcm-simulator/example/src/parachain.rs | 10 +++--- .../xcm-simulator/example/src/relay_chain.rs | 8 ++--- 12 files changed, 70 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa5c42c1fa3..62479cce2a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14348,7 +14348,6 @@ dependencies = [ name = "polkadot-test-runtime" version = "1.0.0" dependencies = [ - "bitvec", "frame-election-provider-support", "frame-executive", "frame-support", @@ -14373,16 +14372,12 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "parity-scale-codec", - "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", - "rustc-hex", "scale-info", "serde", - "serde_derive", "serde_json", - "smallvec", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -21273,11 +21268,8 @@ version = "1.0.0" dependencies = [ "frame-support", "polkadot-primitives", - "polkadot-runtime-common", "smallvec", - "sp-core", "sp-runtime", - "sp-weights", ] [[package]] diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 35fb684597e..6552ed4ef8a 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,14 +11,10 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } -serde_derive = { optional = true, workspace = true } -smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } babe-primitives = { package = "sp-consensus-babe", path = "../../../substrate/primitives/consensus/babe", default-features = false } @@ -63,7 +59,6 @@ pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } polkadot-runtime-parachains = { path = "../parachains", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } @@ -92,7 +87,6 @@ std = [ "authority-discovery-primitives/std", "babe-primitives/std", "beefy-primitives/std", - "bitvec/std", "block-builder-api/std", "frame-election-provider-support/std", "frame-executive/std", @@ -118,14 +112,11 @@ std = [ "pallet-vesting/std", "pallet-xcm/std", "parity-scale-codec/std", - "polkadot-parachain-primitives/std", "polkadot-runtime-parachains/std", "primitives/std", "runtime-common/std", - "rustc-hex/std", "scale-info/std", "serde/std", - "serde_derive", "sp-api/std", "sp-core/std", "sp-genesis-builder/std", @@ -157,7 +148,6 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "primitives/runtime-benchmarks", "runtime-common/runtime-benchmarks", diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index 2b387bbd307..5b8a4d7a051 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -14,18 +14,12 @@ smallvec = "1.8.0" frame-support = { path = "../../../../substrate/frame/support", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } -runtime-common = { package = "polkadot-runtime-common", path = "../../common", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } [features] default = ["std"] std = [ "frame-support/std", "primitives/std", - "runtime-common/std", - "sp-core/std", "sp-runtime/std", - "sp-weights/std", ] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index bf7d4e589de..7233b46d0cd 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -23,7 +23,9 @@ use frame_support::{ traits::{Everything, Nothing}, }; use xcm::latest::prelude::*; -use xcm_builder::{AllowUnpaidExecutionFrom, FrameTransactionalProcessor, MintLocation}; +use xcm_builder::{ + AllowUnpaidExecutionFrom, EnsureDecodableXcm, FrameTransactionalProcessor, MintLocation, +}; type Block = frame_system::mocking::MockBlock; @@ -91,7 +93,7 @@ parameter_types! { pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = DevNull; + type XcmSender = EnsureDecodableXcm; type AssetTransactor = AssetTransactor; type OriginConverter = (); type IsReserve = TrustedReserves; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index da0f28ccf28..a9f4d37d7a5 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -28,7 +28,8 @@ use xcm_builder::{ AssetsInHolding, TestAssetExchanger, TestAssetLocker, TestAssetTrap, TestSubscriptionService, TestUniversalAliases, }, - AliasForeignAccountId32, AllowUnpaidExecutionFrom, FrameTransactionalProcessor, + AliasForeignAccountId32, AllowUnpaidExecutionFrom, EnsureDecodableXcm, + FrameTransactionalProcessor, }; use xcm_executor::traits::ConvertOrigin; @@ -81,7 +82,7 @@ type Aliasers = AliasForeignAccountId32; pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = DevNull; + type XcmSender = EnsureDecodableXcm; type AssetTransactor = NoAssetTransactor; type OriginConverter = AlwaysSignedByDefault; type IsReserve = AllAssetLocationsPass; diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index e3680c530e2..8e94803e843 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -33,10 +33,11 @@ use xcm::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, DescribeAllTerminal, FixedRateOfFungible, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, HashedDescription, IsConcrete, - MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, + ChildSystemParachainAsSuperuser, DescribeAllTerminal, EnsureDecodableXcm, FixedRateOfFungible, + FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, + HashedDescription, IsConcrete, MatchedConvertedConcreteId, NoChecking, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{ traits::{Identity, JustTry}, @@ -488,7 +489,8 @@ pub type Barrier = ( AllowSubscriptionsFrom, ); -pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); +pub type XcmRouter = + EnsureDecodableXcm<(TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm)>; pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index bd4a4c941c9..cdc663a0cc9 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -119,7 +119,7 @@ mod process_xcm_message; pub use process_xcm_message::ProcessXcmMessage; mod routing; -pub use routing::{EnsureDelivery, WithTopicSource, WithUniqueTopic}; +pub use routing::{EnsureDecodableXcm, EnsureDelivery, WithTopicSource, WithUniqueTopic}; mod transactional; pub use transactional::FrameTransactionalProcessor; diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 529ef80c15f..921b9ac5922 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -139,3 +139,37 @@ impl EnsureDelivery for Tuple { (None, None) } } + +/// A wrapper router that attempts to *encode* and *decode* passed XCM `message` to ensure that the +/// receiving side will be able to decode, at least with the same XCM version. +/// +/// This is designed to be at the top-level of any routers which do the real delivery. While other +/// routers can manipulate the `message`, we cannot access the final XCM due to the generic +/// `Inner::Ticket`. Therefore, this router aims to validate at least the passed `message`. +/// +/// NOTE: For use in mock runtimes which don't have the DMP/UMP/HRMP XCM validations. +pub struct EnsureDecodableXcm(sp_std::marker::PhantomData); +impl SendXcm for EnsureDecodableXcm { + type Ticket = Inner::Ticket; + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + if let Some(msg) = message { + let versioned_xcm = VersionedXcm::<()>::from(msg.clone()); + if versioned_xcm.validate_xcm_nesting().is_err() { + log::error!( + target: "xcm::validate_xcm_nesting", + "EnsureDecodableXcm validate_xcm_nesting error for \nversioned_xcm: {versioned_xcm:?}\nbased on xcm: {msg:?}" + ); + return Err(SendError::Transport("EnsureDecodableXcm validate_xcm_nesting error")) + } + } + Inner::validate(destination, message) + } + + fn deliver(ticket: Self::Ticket) -> Result { + Inner::deliver(ticket) + } +} diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 3d03ab05424..7532b97d97b 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -19,6 +19,7 @@ use crate::{ barriers::{AllowSubscriptionsFrom, RespectSuspension, TrailingSetTopicAsId}, test_utils::*, + EnsureDecodableXcm, }; pub use crate::{ AliasForeignAccountId32, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -165,8 +166,8 @@ pub fn set_exporter_override( pub fn clear_exporter_override() { EXPORTER_OVERRIDE.with(|x| x.replace(None)); } -pub struct TestMessageSender; -impl SendXcm for TestMessageSender { +pub struct TestMessageSenderImpl; +impl SendXcm for TestMessageSenderImpl { type Ticket = (Location, Xcm<()>, XcmHash); fn validate( dest: &mut Option, @@ -183,6 +184,8 @@ impl SendXcm for TestMessageSender { Ok(hash) } } +pub type TestMessageSender = EnsureDecodableXcm; + pub struct TestMessageExporter; impl ExportXcm for TestMessageExporter { type Ticket = (NetworkId, u32, InteriorLocation, InteriorLocation, Xcm<()>, XcmHash); diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index da38538b60c..46ec23beebc 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -35,9 +35,9 @@ use staging_xcm_builder as xcm_builder; use xcm_builder::{ AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - FixedRateOfFungible, FixedWeightBounds, FungibleAdapter, IsChildSystemParachain, IsConcrete, - MintLocation, RespectSuspension, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, + EnsureDecodableXcm, FixedRateOfFungible, FixedWeightBounds, FungibleAdapter, + IsChildSystemParachain, IsConcrete, MintLocation, RespectSuspension, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, }; pub type AccountId = AccountId32; @@ -68,6 +68,8 @@ impl SendXcm for TestSendXcm { } } +pub type TestXcmRouter = EnsureDecodableXcm; + // copied from kusama constants pub const UNITS: Balance = 1_000_000_000_000; pub const CENTS: Balance = UNITS / 30_000; @@ -180,7 +182,7 @@ pub type TrustedTeleporters = (xcm_builder::Case,); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = TestSendXcm; + type XcmSender = TestXcmRouter; type AssetTransactor = LocalAssetTransactor; type OriginConverter = LocalOriginConverter; type IsReserve = (); @@ -215,7 +217,7 @@ impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = TestSendXcm; + type XcmRouter = TestXcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Nothing; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index c155ed5ab63..41e62596392 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -40,10 +40,10 @@ use polkadot_parachain_primitives::primitives::{ use xcm::{latest::prelude::*, VersionedXcm}; use xcm_builder::{ Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, ConvertedConcreteId, - EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, FrameTransactionalProcessor, - FungibleAdapter, IsConcrete, NativeAsset, NoChecking, NonFungiblesAdapter, ParentIsPreset, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, + EnsureDecodableXcm, EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, NoChecking, + NonFungiblesAdapter, ParentIsPreset, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, }; use xcm_executor::{ traits::{ConvertLocation, JustTry}, @@ -212,7 +212,7 @@ pub type LocalAssetTransactor = ( >, ); -pub type XcmRouter = super::ParachainXcmRouter; +pub type XcmRouter = EnsureDecodableXcm>; pub type Barrier = AllowUnpaidExecutionFrom; parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 4e8f1f68ebd..b41df3cfa2b 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -36,9 +36,9 @@ use xcm::latest::prelude::*; use xcm_builder::{ Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, AsPrefixedGeneralIndex, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - ConvertedConcreteId, FixedRateOfFungible, FixedWeightBounds, FrameTransactionalProcessor, - FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, + ConvertedConcreteId, EnsureDecodableXcm, FixedRateOfFungible, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, }; use xcm_executor::{traits::JustTry, Config, XcmExecutor}; @@ -168,7 +168,7 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -pub type XcmRouter = super::RelayChainXcmRouter; +pub type XcmRouter = EnsureDecodableXcm; pub type Barrier = AllowUnpaidExecutionFrom; pub struct XcmConfig; -- GitLab From eda5e5c31f9bffafd6afd6d14fb95001a10dba9a Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Tue, 23 Apr 2024 14:53:20 +0200 Subject: [PATCH 058/107] Fix Stuck Collator Funds (#4229) Fixes https://github.com/paritytech/polkadot-sdk/issues/4206 In #1340 one of the storage types was changed from `Candidates` to `CandidateList`. Since the actual key includes the hash of this value, all of the candidates stored here are (a) "missing" and (b) unable to unreserve their candidacy bond. This migration kills the storage values and refunds the deposit held for each candidate. --------- Signed-off-by: georgepisaltu Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: georgepisaltu --- cumulus/pallets/collator-selection/Cargo.toml | 3 +- cumulus/pallets/collator-selection/src/lib.rs | 2 +- .../collator-selection/src/migration.rs | 233 +++++++++++++++++- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives-westend/src/lib.rs | 2 +- .../contracts/contracts-rococo/src/lib.rs | 2 + .../coretime/coretime-rococo/src/lib.rs | 1 + .../coretime/coretime-westend/src/lib.rs | 1 + .../runtimes/people/people-rococo/src/lib.rs | 1 + .../runtimes/people/people-westend/src/lib.rs | 1 + prdoc/pr_4229.prdoc | 10 + 14 files changed, 256 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_4229.prdoc diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index c04d9e1403e..25ca2fe057b 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -27,6 +27,7 @@ sp-staking = { path = "../../../substrate/primitives/staking", default-features frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -38,7 +39,6 @@ sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-runtime = { path = "../../../substrate/primitives/runtime" } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -pallet-balances = { path = "../../../substrate/frame/balances" } pallet-aura = { path = "../../../substrate/frame/aura" } [features] @@ -59,6 +59,7 @@ std = [ "frame-system/std", "log/std", "pallet-authorship/std", + "pallet-balances/std", "pallet-session/std", "rand/std", "scale-info/std", diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 17bbe2591d4..2fa38436752 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -121,7 +121,7 @@ pub mod pallet { use sp_std::vec::Vec; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 5dc2fba4279..425acdd8bfb 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -17,9 +17,107 @@ //! A module that is responsible for migration of storage for Collator Selection. use super::*; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; use log; +/// Migrate to v2. Should have been part of . +pub mod v2 { + use super::*; + use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{Currency, ReservableCurrency}, + }; + use sp_runtime::traits::{Saturating, Zero}; + #[cfg(feature = "try-runtime")] + use sp_std::vec::Vec; + + /// [`UncheckedMigrationToV2`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 1. + pub type MigrationToV2 = frame_support::migrations::VersionedMigration< + 1, + 2, + UncheckedMigrationToV2, + Pallet, + ::DbWeight, + >; + + #[storage_alias] + pub type Candidates = StorageValue< + Pallet, + BoundedVec::AccountId, <::Currency as Currency<::AccountId>>::Balance>, ::MaxCandidates>, + ValueQuery, + >; + + /// Migrate to V2. + pub struct UncheckedMigrationToV2(sp_std::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV2 { + fn on_runtime_upgrade() -> Weight { + let mut weight = Weight::zero(); + let mut count: u64 = 0; + // candidates who exist under the old `Candidates` key + let candidates = Candidates::::take(); + + // New candidates who have registered since the upgrade. Under normal circumstances, + // this should not exist because the migration should be applied when the upgrade + // happens. But in Polkadot/Kusama we messed this up, and people registered under + // `CandidateList` while their funds were locked in `Candidates`. + let new_candidate_list = CandidateList::::get(); + if new_candidate_list.len().is_zero() { + // The new list is empty, so this is essentially being applied correctly. We just + // put the candidates into the new storage item. + CandidateList::::put(&candidates); + // 1 write for the new list + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } else { + // Oops, the runtime upgraded without the migration. There are new candidates in + // `CandidateList`. So, let's just refund the old ones and assume they have already + // started participating in the new system. + for candidate in candidates { + let err = T::Currency::unreserve(&candidate.who, candidate.deposit); + if err > Zero::zero() { + log::error!( + target: LOG_TARGET, + "{:?} balance was unable to be unreserved from {:?}", + err, &candidate.who, + ); + } + count.saturating_inc(); + } + weight.saturating_accrue( + <::WeightInfo as pallet_balances::WeightInfo>::force_unreserve().saturating_mul(count.into()), + ); + } + + log::info!( + target: LOG_TARGET, + "Unreserved locked bond of {} candidates, upgraded storage to version 2", + count, + ); + + weight.saturating_accrue(T::DbWeight::get().reads_writes(3, 2)); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + let number_of_candidates = Candidates::::get().to_vec().len(); + Ok((number_of_candidates as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_number_of_candidates: Vec) -> Result<(), sp_runtime::DispatchError> { + let new_number_of_candidates = Candidates::::get().to_vec().len(); + assert_eq!( + new_number_of_candidates, 0 as usize, + "after migration, the candidates map should be empty" + ); + Ok(()) + } + } +} + /// Version 1 Migration /// This migration ensures that any existing `Invulnerables` storage lists are sorted. pub mod v1 { @@ -90,3 +188,136 @@ pub mod v1 { } } } + +#[cfg(all(feature = "try-runtime", test))] +mod tests { + use super::*; + use crate::{ + migration::v2::Candidates, + mock::{new_test_ext, Balances, Test}, + }; + use frame_support::{ + traits::{Currency, ReservableCurrency, StorageVersion}, + BoundedVec, + }; + use sp_runtime::traits::ConstU32; + + #[test] + fn migrate_to_v2_with_new_candidates() { + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(1); + storage_version.put::>(); + + let one = 1u64; + let two = 2u64; + let three = 3u64; + let deposit = 10u64; + + // Set balance to 100 + Balances::make_free_balance_be(&one, 100u64); + Balances::make_free_balance_be(&two, 100u64); + Balances::make_free_balance_be(&three, 100u64); + + // Reservations: 10 for the "old" candidacy and 10 for the "new" + Balances::reserve(&one, 10u64).unwrap(); // old + Balances::reserve(&two, 20u64).unwrap(); // old + new + Balances::reserve(&three, 10u64).unwrap(); // new + + // Candidate info + let candidate_one = CandidateInfo { who: one, deposit }; + let candidate_two = CandidateInfo { who: two, deposit }; + let candidate_three = CandidateInfo { who: three, deposit }; + + // Storage lists + let bounded_candidates = + BoundedVec::, ConstU32<20>>::try_from(vec![ + candidate_one.clone(), + candidate_two.clone(), + ]) + .expect("it works"); + let bounded_candidate_list = + BoundedVec::, ConstU32<20>>::try_from(vec![ + candidate_two.clone(), + candidate_three.clone(), + ]) + .expect("it works"); + + // Set storage + Candidates::::put(bounded_candidates); + CandidateList::::put(bounded_candidate_list.clone()); + + // Sanity check + assert_eq!(Balances::free_balance(one), 90); + assert_eq!(Balances::free_balance(two), 80); + assert_eq!(Balances::free_balance(three), 90); + + // Run migration + v2::MigrationToV2::::on_runtime_upgrade(); + + let new_storage_version = StorageVersion::get::>(); + assert_eq!(new_storage_version, 2); + + // 10 should have been unreserved from the old candidacy + assert_eq!(Balances::free_balance(one), 100); + assert_eq!(Balances::free_balance(two), 90); + assert_eq!(Balances::free_balance(three), 90); + // The storage item should be gone + assert!(Candidates::::get().is_empty()); + // The new storage item should be preserved + assert_eq!(CandidateList::::get(), bounded_candidate_list); + }); + } + + #[test] + fn migrate_to_v2_without_new_candidates() { + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(1); + storage_version.put::>(); + + let one = 1u64; + let two = 2u64; + let deposit = 10u64; + + // Set balance to 100 + Balances::make_free_balance_be(&one, 100u64); + Balances::make_free_balance_be(&two, 100u64); + + // Reservations + Balances::reserve(&one, 10u64).unwrap(); // old + Balances::reserve(&two, 10u64).unwrap(); // old + + // Candidate info + let candidate_one = CandidateInfo { who: one, deposit }; + let candidate_two = CandidateInfo { who: two, deposit }; + + // Storage lists + let bounded_candidates = + BoundedVec::, ConstU32<20>>::try_from(vec![ + candidate_one.clone(), + candidate_two.clone(), + ]) + .expect("it works"); + + // Set storage + Candidates::::put(bounded_candidates.clone()); + + // Sanity check + assert_eq!(Balances::free_balance(one), 90); + assert_eq!(Balances::free_balance(two), 90); + + // Run migration + v2::MigrationToV2::::on_runtime_upgrade(); + + let new_storage_version = StorageVersion::get::>(); + assert_eq!(new_storage_version, 2); + + // Nothing changes deposit-wise + assert_eq!(Balances::free_balance(one), 90); + assert_eq!(Balances::free_balance(two), 90); + // The storage item should be gone + assert!(Candidates::::get().is_empty()); + // The new storage item should have the info now + assert_eq!(CandidateList::::get(), bounded_candidates); + }); + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 201647ac2eb..15173480463 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -973,10 +973,10 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. #[allow(deprecated)] pub type Migrations = ( - pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 78c83cf6922..64127c80b6d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -963,7 +963,7 @@ pub type Migrations = ( // v9420 pallet_nfts::migration::v1::MigrateToV1, // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, // unreleased pallet_multisig::migrations::v1::MigrateToV1, // unreleased diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 1eac813b10c..109b081f937 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -139,7 +139,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index b4ea2c79f64..cf09a1acc54 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -118,7 +118,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, // unreleased diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index c599ba37f12..7274e9acdcd 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -722,7 +722,7 @@ pub type UncheckedExtrinsic = /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, // permanent diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index efa26fcbc22..988195d88d8 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -98,6 +98,8 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_parachain_system::migration::Migration, cumulus_pallet_xcmp_queue::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v3::MigrationToV3, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index ad065ee3477..895890da7dd 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -108,6 +108,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, // permanent diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 0f074226861..9d080087d5d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -108,6 +108,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, // permanent diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 3cd085fec63..4a57bad01c8 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -102,6 +102,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 307ab90a477..22e8fd57d3c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -102,6 +102,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/prdoc/pr_4229.prdoc b/prdoc/pr_4229.prdoc new file mode 100644 index 00000000000..05af8e062a3 --- /dev/null +++ b/prdoc/pr_4229.prdoc @@ -0,0 +1,10 @@ +title: "Fix Stuck Collator Funds" + +doc: + - audience: Runtime Dev + description: | + Fixes stuck collator funds by providing a migration that should have been in PR 1340. + +crates: + - name: pallet-collator-selection + bump: patch -- GitLab From ffbce2a817ec2e7c8b7ce49f7ed6794584f19667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Apr 2024 17:37:24 +0200 Subject: [PATCH 059/107] pallet_broker: Let `start_sales` calculate and request the correct core count (#4221) --- prdoc/pr_4221.prdoc | 15 ++++++++ substrate/frame/broker/src/benchmarking.rs | 10 ++++-- .../frame/broker/src/dispatchable_impls.rs | 10 +++++- substrate/frame/broker/src/lib.rs | 23 +++++------- substrate/frame/broker/src/tests.rs | 36 ++++++++++++++----- 5 files changed, 68 insertions(+), 26 deletions(-) create mode 100644 prdoc/pr_4221.prdoc diff --git a/prdoc/pr_4221.prdoc b/prdoc/pr_4221.prdoc new file mode 100644 index 00000000000..e4941cce892 --- /dev/null +++ b/prdoc/pr_4221.prdoc @@ -0,0 +1,15 @@ +title: "pallet_broker::start_sales: Take `extra_cores` and not total cores" + +doc: + - audience: Runtime User + description: | + Change `pallet_broker::start_sales` to take `extra_cores` and not total cores. + It will calculate the total number of cores to offer based on number of + reservations plus number of leases plus `extra_cores`. Internally it will + also notify the relay chain of the required number of cores. + + Thus, starting the first sales with `pallet-broker` requires less brain power ;) + +crates: +- name: pallet-broker + bump: minor diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 1fc1c3a101a..7533e3dc68c 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -189,11 +189,15 @@ mod benches { let config = new_config_record::(); Configuration::::put(config.clone()); + let mut extra_cores = n; + // Assume Reservations to be filled for worst case - setup_reservations::(T::MaxReservedCores::get()); + setup_reservations::(extra_cores.min(T::MaxReservedCores::get())); + extra_cores = extra_cores.saturating_sub(T::MaxReservedCores::get()); // Assume Leases to be filled for worst case - setup_leases::(T::MaxLeasedCores::get(), 1, 10); + setup_leases::(extra_cores.min(T::MaxLeasedCores::get()), 1, 10); + extra_cores = extra_cores.saturating_sub(T::MaxLeasedCores::get()); let latest_region_begin = Broker::::latest_timeslice_ready_to_commit(&config); @@ -203,7 +207,7 @@ mod benches { T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] - _(origin as T::RuntimeOrigin, initial_price, n.try_into().unwrap()); + _(origin as T::RuntimeOrigin, initial_price, extra_cores.try_into().unwrap()); assert!(SaleInfo::::get().is_some()); assert_last_event::( diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index cb7393cc9e3..45a0a514c30 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -70,8 +70,16 @@ impl Pallet { Ok(()) } - pub(crate) fn do_start_sales(price: BalanceOf, core_count: CoreIndex) -> DispatchResult { + pub(crate) fn do_start_sales(price: BalanceOf, extra_cores: CoreIndex) -> DispatchResult { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; + + // Determine the core count + let core_count = Leases::::decode_len().unwrap_or(0) as CoreIndex + + Reservations::::decode_len().unwrap_or(0) as CoreIndex + + extra_cores; + + Self::do_request_core_count(core_count)?; + let commit_timeslice = Self::latest_timeslice_ready_to_commit(&config); let status = StatusRecord { core_count, diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 1ef9e59f018..d59c4c9c6b2 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -559,27 +559,22 @@ pub mod pallet { /// /// - `origin`: Must be Root or pass `AdminOrigin`. /// - `initial_price`: The price of Bulk Coretime in the first sale. - /// - `total_core_count`: This is the total number of cores the relay chain should have - /// after the sale concludes. + /// - `extra_cores`: Number of extra cores that should be requested on top of the cores + /// required for `Reservations` and `Leases`. /// - /// NOTE: This function does not actually request that new core count from the relay chain. - /// You need to make sure to call `request_core_count` afterwards to bring the relay chain - /// in sync. - /// - /// When to call the function depends on the new core count. If it is larger than what it - /// was before, you can call it immediately or even before `start_sales` as non allocated - /// cores will just be `Idle`. If you are actually reducing the number of cores, you should - /// call `request_core_count`, right before the next sale, to avoid shutting down tasks too - /// early. + /// This will call [`Self::request_core_count`] internally to set the correct core count on + /// the relay chain. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::start_sales((*total_core_count).into()))] + #[pallet::weight(T::WeightInfo::start_sales( + T::MaxLeasedCores::get() + T::MaxReservedCores::get() + *extra_cores as u32 + ))] pub fn start_sales( origin: OriginFor, initial_price: BalanceOf, - total_core_count: CoreIndex, + extra_cores: CoreIndex, ) -> DispatchResultWithPostInfo { T::AdminOrigin::ensure_origin_or_root(origin)?; - Self::do_start_sales(initial_price, total_core_count)?; + Self::do_start_sales(initial_price, extra_cores)?; Ok(Pays::No.into()) } diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index c573b6c55a2..f929f0d50dc 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -329,7 +329,7 @@ fn nft_metadata_works() { fn migration_works() { TestExt::new().endow(1, 1000).execute_with(|| { assert_ok!(Broker::do_set_lease(1000, 8)); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 1)); // Sale is for regions from TS4..7 // Not ending in this sale period. @@ -385,7 +385,7 @@ fn instapool_payouts_work() { TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); - assert_ok!(Broker::do_start_sales(100, 3)); + assert_ok!(Broker::do_start_sales(100, 2)); advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); assert_ok!(Broker::do_pool(region, None, 2, Final)); @@ -411,7 +411,7 @@ fn instapool_partial_core_payouts_work() { TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); let (region1, region2) = @@ -477,7 +477,7 @@ fn initialize_with_system_paras_works() { ScheduleItem { assignment: Task(4u32), mask: 0x00000_00000_00000_fffff.into() }, ]; assert_ok!(Broker::do_reserve(Schedule::truncate_from(items))); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 0)); advance_to(10); assert_eq!( CoretimeTrace::get(), @@ -510,7 +510,7 @@ fn initialize_with_leased_slots_works() { TestExt::new().execute_with(|| { assert_ok!(Broker::do_set_lease(1000, 6)); assert_ok!(Broker::do_set_lease(1001, 7)); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 0)); advance_to(18); let end_hint = None; assert_eq!( @@ -925,7 +925,7 @@ fn leases_can_be_renewed() { assert_ok!(Broker::do_set_lease(2001, 9)); assert_eq!(Leases::::get().len(), 1); // Start the sales with only one core for this lease. - assert_ok!(Broker::do_start_sales(100, 1)); + assert_ok!(Broker::do_start_sales(100, 0)); // Advance to sale period 1, we should get an AllowedRenewal for task 2001 for the next // sale. @@ -1018,7 +1018,7 @@ fn short_leases_cannot_be_renewed() { assert_ok!(Broker::do_set_lease(2001, 3)); assert_eq!(Leases::::get().len(), 1); // Start the sales with one core for this lease. - assert_ok!(Broker::do_start_sales(100, 1)); + assert_ok!(Broker::do_start_sales(100, 0)); // The lease is removed. assert_eq!(Leases::::get().len(), 0); @@ -1290,7 +1290,7 @@ fn renewal_works_leases_ended_before_start_sales() { )); // This intializes the first sale and the period 0. - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 0)); assert_noop!(Broker::do_renew(1, 1), Error::::Unavailable); assert_noop!(Broker::do_renew(1, 0), Error::::Unavailable); @@ -1408,3 +1408,23 @@ fn renewal_works_leases_ended_before_start_sales() { ); }); } + +#[test] +fn start_sales_sets_correct_core_count() { + TestExt::new().endow(1, 1000).execute_with(|| { + advance_to(1); + + Broker::do_set_lease(1, 100).unwrap(); + Broker::do_set_lease(2, 100).unwrap(); + Broker::do_set_lease(3, 100).unwrap(); + Broker::do_reserve(Schedule::truncate_from(vec![ScheduleItem { + assignment: Pool, + mask: CoreMask::complete(), + }])) + .unwrap(); + + Broker::do_start_sales(5, 5).unwrap(); + + System::assert_has_event(Event::::CoreCountRequested { core_count: 9 }.into()); + }) +} -- GitLab From 0a56d071c75856aacb2bf90dd8aaf29399a28e69 Mon Sep 17 00:00:00 2001 From: gupnik Date: Wed, 24 Apr 2024 11:25:54 +0530 Subject: [PATCH 060/107] Adds ability to trigger tasks via unsigned transactions (#4075) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR updates the `validate_unsigned` hook for `frame_system` to allow valid tasks to be submitted as unsigned transactions. It also updates the task example to be able to submit such transactions via an off-chain worker. --------- Co-authored-by: Bastian Köcher --- prdoc/pr_4075.prdoc | 19 +++++++++++ substrate/frame/examples/tasks/src/lib.rs | 38 +++++++++++++++++++-- substrate/frame/examples/tasks/src/mock.rs | 21 ++++++++++++ substrate/frame/examples/tasks/src/tests.rs | 30 ++++++++++++++++ substrate/frame/support/src/lib.rs | 3 ++ substrate/frame/support/src/traits/tasks.rs | 4 +++ substrate/frame/system/src/lib.rs | 16 +++++++-- 7 files changed, 126 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_4075.prdoc diff --git a/prdoc/pr_4075.prdoc b/prdoc/pr_4075.prdoc new file mode 100644 index 00000000000..05e54073b6c --- /dev/null +++ b/prdoc/pr_4075.prdoc @@ -0,0 +1,19 @@ +title: Adds ability to trigger tasks via unsigned transactions + +doc: + - audience: Runtime Dev + description: | + This PR updates the `validate_unsigned` hook for `frame_system` to allow valid tasks + to be submitted as unsigned transactions. It also updates the task example to be able to + submit such transactions via an off-chain worker. + + Note that `is_valid` call on a task MUST be cheap with minimal to no storage reads. + Else, it can make the blockchain vulnerable to DoS attacks. + + Further, these tasks will be executed in a random order. + +crates: + - name: frame-system + bump: patch + - name: pallet-example-tasks + bump: minor diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs index c65d8095bcf..1908a235ba1 100644 --- a/substrate/frame/examples/tasks/src/lib.rs +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -19,6 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::dispatch::DispatchResult; +use frame_system::offchain::SendTransactionTypes; +#[cfg(feature = "experimental")] +use frame_system::offchain::SubmitTransaction; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; @@ -31,10 +34,14 @@ mod benchmarking; pub mod weights; pub use weights::*; +#[cfg(feature = "experimental")] +const LOG_TARGET: &str = "pallet-example-tasks"; + #[frame_support::pallet(dev_mode)] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::error] pub enum Error { @@ -59,9 +66,36 @@ pub mod pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "experimental")] + fn offchain_worker(_block_number: BlockNumberFor) { + if let Some(key) = Numbers::::iter_keys().next() { + // Create a valid task + let task = Task::::AddNumberIntoTotal { i: key }; + let runtime_task = ::RuntimeTask::from(task); + let call = frame_system::Call::::do_task { task: runtime_task.into() }; + + // Submit the task as an unsigned transaction + let res = + SubmitTransaction::>::submit_unsigned_transaction( + call.into(), + ); + match res { + Ok(_) => log::info!(target: LOG_TARGET, "Submitted the task."), + Err(e) => log::error!(target: LOG_TARGET, "Error submitting task: {:?}", e), + } + } + } + } + #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeTask: frame_support::traits::Task; + pub trait Config: + SendTransactionTypes> + frame_system::Config + { + type RuntimeTask: frame_support::traits::Task + + IsType<::RuntimeTask> + + From>; type WeightInfo: WeightInfo; } diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 76ac9e76bff..33912bb5269 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -20,6 +20,7 @@ use crate::{self as tasks_example}; use frame_support::derive_impl; +use sp_runtime::testing::TestXt; pub type AccountId = u32; pub type Balance = u32; @@ -32,12 +33,32 @@ frame_support::construct_runtime!( } ); +pub type Extrinsic = TestXt; + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; } +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = Extrinsic; +} + impl tasks_example::Config for Runtime { type RuntimeTask = RuntimeTask; type WeightInfo = (); } + +pub fn advance_to(b: u64) { + #[cfg(feature = "experimental")] + use frame_support::traits::Hooks; + while System::block_number() < b { + System::set_block_number(System::block_number() + 1); + #[cfg(feature = "experimental")] + TasksExample::offchain_worker(System::block_number()); + } +} diff --git a/substrate/frame/examples/tasks/src/tests.rs b/substrate/frame/examples/tasks/src/tests.rs index fc3c69f4aef..6c8acb0194b 100644 --- a/substrate/frame/examples/tasks/src/tests.rs +++ b/substrate/frame/examples/tasks/src/tests.rs @@ -19,7 +19,11 @@ #![cfg(test)] use crate::{mock::*, Numbers}; +#[cfg(feature = "experimental")] +use codec::Decode; use frame_support::traits::Task; +#[cfg(feature = "experimental")] +use sp_core::offchain::{testing, OffchainWorkerExt, TransactionPoolExt}; use sp_runtime::BuildStorage; #[cfg(feature = "experimental")] @@ -130,3 +134,29 @@ fn task_execution_fails_for_invalid_task() { ); }); } + +#[cfg(feature = "experimental")] +#[test] +fn task_with_offchain_worker() { + let (offchain, _offchain_state) = testing::TestOffchainExt::new(); + let (pool, pool_state) = testing::TestTransactionPoolExt::new(); + + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainWorkerExt::new(offchain)); + t.register_extension(TransactionPoolExt::new(pool)); + + t.execute_with(|| { + advance_to(1); + assert!(pool_state.read().transactions.is_empty()); + + Numbers::::insert(0, 10); + assert_eq!(crate::Total::::get(), (0, 0)); + + advance_to(2); + + let tx = pool_state.write().transactions.pop().unwrap(); + assert!(pool_state.read().transactions.is_empty()); + let tx = Extrinsic::decode(&mut &*tx).unwrap(); + assert_eq!(tx.signature, None); + }); +} diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 984a7f7537f..7eddea1259d 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2465,6 +2465,9 @@ pub mod pallet_macros { /// Finally, the `RuntimeTask` can then used by a script or off-chain worker to create and /// submit such tasks via an extrinsic defined in `frame_system` called `do_task`. /// + /// When submitted as unsigned transactions (for example via an off-chain workder), note + /// that the tasks will be executed in a random order. + /// /// ## Example #[doc = docify::embed!("src/tests/tasks.rs", tasks_example)] /// Now, this can be executed as follows: diff --git a/substrate/frame/support/src/traits/tasks.rs b/substrate/frame/support/src/traits/tasks.rs index 24f3430cf50..42b837e5597 100644 --- a/substrate/frame/support/src/traits/tasks.rs +++ b/substrate/frame/support/src/traits/tasks.rs @@ -46,6 +46,10 @@ pub trait Task: Sized + FullCodec + TypeInfo + Clone + Debug + PartialEq + Eq { fn iter() -> Self::Enumeration; /// Checks if a particular instance of this `Task` variant is a valid piece of work. + /// + /// This is used to validate tasks for unsigned execution. Hence, it MUST be cheap + /// with minimal to no storage reads. Else, it can make the blockchain vulnerable + /// to DoS attacks. fn is_valid(&self) -> bool; /// Performs the work for this particular `Task` variant. diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 184f27b61ed..30df4dcfd43 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -741,9 +741,7 @@ pub mod pallet { #[cfg(feature = "experimental")] #[pallet::call_index(8)] #[pallet::weight(task.weight())] - pub fn do_task(origin: OriginFor, task: T::RuntimeTask) -> DispatchResultWithPostInfo { - ensure_signed(origin)?; - + pub fn do_task(_origin: OriginFor, task: T::RuntimeTask) -> DispatchResultWithPostInfo { if !task.is_valid() { return Err(Error::::InvalidTask.into()) } @@ -1032,6 +1030,18 @@ pub mod pallet { }) } } + #[cfg(feature = "experimental")] + if let Call::do_task { ref task } = call { + if task.is_valid() { + return Ok(ValidTransaction { + priority: u64::max_value(), + requires: Vec::new(), + provides: vec![T::Hashing::hash_of(&task.encode()).as_ref().to_vec()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } Err(InvalidTransaction::Call.into()) } } -- GitLab From 9a0049d0da59b8b842f64fae441b34dba3408430 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Wed, 24 Apr 2024 09:15:39 +0300 Subject: [PATCH 061/107] Plumbing to increase pvf workers configuration based on chain id (#4252) Part of https://github.com/paritytech/polkadot-sdk/issues/4126 we want to safely increase the execute_workers_max_num gradually from chain to chain and assess if there are any negative impacts. This PR performs the necessary plumbing to be able to increase it based on the chain id, it increase the number of execution workers from 2 to 4 on test network but lives kusama and polkadot unchanged until we gather more data. --------- Signed-off-by: Alexandru Gheorghe --- .../src/lib.rs | 3 +++ polkadot/cli/src/cli.rs | 17 ++++++++++++++++ polkadot/cli/src/command.rs | 3 +++ .../node/core/candidate-validation/src/lib.rs | 13 ++++++++++++ .../benches/host_prepare_rococo_runtime.rs | 3 +++ polkadot/node/core/pvf/src/host.rs | 9 ++++++--- polkadot/node/core/pvf/tests/it/main.rs | 3 +++ polkadot/node/service/src/lib.rs | 20 +++++++++++++++++++ polkadot/node/test/service/src/lib.rs | 6 ++++++ .../adder/collator/src/main.rs | 3 +++ .../undying/collator/src/main.rs | 3 +++ prdoc/pr_4252.prdoc | 15 ++++++++++++++ 12 files changed, 95 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_4252.prdoc diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 6ea02b2e7c1..578b942776d 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -312,6 +312,9 @@ fn build_polkadot_full_node( overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, )?; diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 3737942e6e5..3e5a6ccdd3c 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -131,6 +131,23 @@ pub struct RunCmd { #[arg(long, value_name = "PATH")] pub workers_path: Option, + /// Override the maximum number of pvf execute workers. + /// + /// **Dangerous!** Do not touch unless explicitly advised to. + #[arg(long)] + pub execute_workers_max_num: Option, + /// Override the maximum number of pvf workers that can be spawned in the pvf prepare + /// pool for tasks with the priority below critical. + /// + /// **Dangerous!** Do not touch unless explicitly advised to. + + #[arg(long)] + pub prepare_workers_soft_max_num: Option, + /// Override the absolute number of pvf workers that can be spawned in the pvf prepare pool. + /// + /// **Dangerous!** Do not touch unless explicitly advised to. + #[arg(long)] + pub prepare_workers_hard_max_num: Option, /// TESTING ONLY: disable the version check between nodes and workers. #[arg(long, hide = true)] pub disable_worker_version_check: bool, diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 6af93a75638..f5ee538e8ce 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -253,6 +253,9 @@ where .overseer_channel_capacity_override, malus_finality_delay: maybe_malus_finality_delay, hwbench, + execute_workers_max_num: cli.run.execute_workers_max_num, + prepare_workers_hard_max_num: cli.run.prepare_workers_hard_max_num, + prepare_workers_soft_max_num: cli.run.prepare_workers_soft_max_num, }, ) .map(|full| full.task_manager)?; diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 8663dc43835..08881dad196 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -100,6 +100,13 @@ pub struct Config { pub prep_worker_path: PathBuf, /// Path to the execution worker binary pub exec_worker_path: PathBuf, + /// The maximum number of pvf execution workers. + pub pvf_execute_workers_max_num: usize, + /// The maximum number of pvf workers that can be spawned in the pvf prepare pool for tasks + /// with the priority below critical. + pub pvf_prepare_workers_soft_max_num: usize, + /// The absolute number of pvf workers that can be spawned in the pvf prepare pool. + pub pvf_prepare_workers_hard_max_num: usize, } /// The candidate validation subsystem. @@ -224,6 +231,9 @@ async fn run( secure_validator_mode, prep_worker_path, exec_worker_path, + pvf_execute_workers_max_num, + pvf_prepare_workers_soft_max_num, + pvf_prepare_workers_hard_max_num, }: Config, ) -> SubsystemResult<()> { let (validation_host, task) = polkadot_node_core_pvf::start( @@ -233,6 +243,9 @@ async fn run( secure_validator_mode, prep_worker_path, exec_worker_path, + pvf_execute_workers_max_num, + pvf_prepare_workers_soft_max_num, + pvf_prepare_workers_hard_max_num, ), pvf_metrics, ) diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index 2aea21361a3..97a03e6596d 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -48,6 +48,9 @@ impl TestHost { false, prepare_worker_path, execute_worker_path, + 2, + 1, + 2, ); f(&mut config); let (host, task) = start(config, Metrics::default()).await.unwrap(); diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 2d180fc5929..4065598a3ac 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -188,6 +188,9 @@ impl Config { secure_validator_mode: bool, prepare_worker_program_path: PathBuf, execute_worker_program_path: PathBuf, + execute_workers_max_num: usize, + prepare_workers_soft_max_num: usize, + prepare_workers_hard_max_num: usize, ) -> Self { Self { cache_path, @@ -196,12 +199,12 @@ impl Config { prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), - prepare_workers_soft_max_num: 1, - prepare_workers_hard_max_num: 2, + prepare_workers_soft_max_num, + prepare_workers_hard_max_num, execute_worker_program_path, execute_worker_spawn_timeout: Duration::from_secs(3), - execute_workers_max_num: 2, + execute_workers_max_num, } } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 16ef23c69ca..56cc681aff3 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -63,6 +63,9 @@ impl TestHost { false, prepare_worker_path, execute_worker_path, + 2, + 1, + 2, ); f(&mut config); let (host, task) = start(config, Metrics::default()).await.unwrap(); diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 22231c84b1d..e5c29172099 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -643,6 +643,13 @@ pub struct NewFullParams { pub workers_path: Option, /// Optional custom names for the prepare and execute workers. pub workers_names: Option<(String, String)>, + /// An optional number of the maximum number of pvf execute workers. + pub execute_workers_max_num: Option, + /// An optional maximum number of pvf workers that can be spawned in the pvf prepare pool for + /// tasks with the priority below critical. + pub prepare_workers_soft_max_num: Option, + /// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool. + pub prepare_workers_hard_max_num: Option, pub overseer_gen: OverseerGenerator, pub overseer_message_channel_capacity_override: Option, #[allow(dead_code)] @@ -738,6 +745,9 @@ pub fn new_full< overseer_message_channel_capacity_override, malus_finality_delay: _malus_finality_delay, hwbench, + execute_workers_max_num, + prepare_workers_soft_max_num, + prepare_workers_hard_max_num, }: NewFullParams, ) -> Result { use polkadot_node_network_protocol::request_response::IncomingRequest; @@ -943,6 +953,16 @@ pub fn new_full< secure_validator_mode, prep_worker_path, exec_worker_path, + pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else( + || match config.chain_spec.identify_chain() { + // The intention is to use this logic for gradual increasing from 2 to 4 + // of this configuration chain by chain untill it reaches production chain. + Chain::Polkadot | Chain::Kusama => 2, + Chain::Rococo | Chain::Westend | Chain::Unknown => 4, + }, + ), + pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), + pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), }) } else { None diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index d313c193334..87fbc7c20f3 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -97,6 +97,9 @@ pub fn new_full( overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ), sc_network::config::NetworkBackendType::Litep2p => @@ -116,6 +119,9 @@ pub fn new_full( overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ), } diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index fec90fc41cd..e8588274df2 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -95,6 +95,9 @@ fn main() -> Result<()> { overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 45f21e7b859..7198a831a47 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -97,6 +97,9 @@ fn main() -> Result<()> { overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ) .map_err(|e| e.to_string())?; diff --git a/prdoc/pr_4252.prdoc b/prdoc/pr_4252.prdoc new file mode 100644 index 00000000000..22987b46845 --- /dev/null +++ b/prdoc/pr_4252.prdoc @@ -0,0 +1,15 @@ +title: "Add logic to increase pvf worker based on chain" + +doc: + - audience: Node Operator + description: | + A new logic and cli parameters were added to allow increasing the number of pvf + workers based on the chain-id. + +crates: + - name: polkadot-node-core-candidate-validation + bump: minor + - name: polkadot-cli + bump: minor + - name: polkadot-service + bump: minor -- GitLab From e0584a153df63ff138d12764085422ed06de548a Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 24 Apr 2024 11:44:42 +0300 Subject: [PATCH 062/107] pallet-xcm::transfer_assets_using_type() supports custom actions on destination (#4260) Change `transfer_assets_using_type()` to not assume `DepositAssets` as the intended use of the assets on the destination. Instead provides the caller with the ability to specify custom XCM that be executed on `dest` chain as the last step of the transfer, thus allowing custom usecases for the transferred assets. E.g. some are used/swapped/etc there, while some are sent further to yet another chain. Note: this is a follow-up on https://github.com/paritytech/polkadot-sdk/pull/3695, bringing in an API change for `transfer_assets_using_type()`. This is ok as the previous version has not been yet released. Thus, its first release will include the new API proposed by this PR. This allows usecases such as: https://forum.polkadot.network/t/managing-sas-on-multiple-reserve-chains-for-same-asset/7538/4 BTW: all this pallet-xcm asset transfers code will be massively reduced once we have https://github.com/paritytech/xcm-format/pull/54 --------- Signed-off-by: Adrian Catangiu --- .../tests/assets/asset-hub-rococo/src/lib.rs | 5 +- ...ssets_transfers.rs => hybrid_transfers.rs} | 203 +++++++++++++++++- .../assets/asset-hub-rococo/src/tests/mod.rs | 2 +- .../src/tests/reserve_transfer.rs | 2 +- .../tests/assets/asset-hub-westend/src/lib.rs | 5 +- ...ssets_transfers.rs => hybrid_transfers.rs} | 203 +++++++++++++++++- .../assets/asset-hub-westend/src/tests/mod.rs | 2 +- .../src/tests/reserve_transfer.rs | 2 +- .../src/tests/asset_transfers.rs | 8 +- .../src/tests/asset_transfers.rs | 8 +- polkadot/xcm/pallet-xcm/src/lib.rs | 196 +++++++++-------- prdoc/pr_3695.prdoc | 9 +- 12 files changed, 530 insertions(+), 115 deletions(-) rename cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/{foreign_assets_transfers.rs => hybrid_transfers.rs} (76%) rename cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/{foreign_assets_transfers.rs => hybrid_transfers.rs} (76%) diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 322c6cf1f22..2bd388bee40 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -70,7 +70,9 @@ mod imports { LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, }; - pub use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; + pub use rococo_runtime::xcm_config::{ + UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, + }; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; @@ -83,6 +85,7 @@ mod imports { pub type ParaToSystemParaTest = Test; pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; + pub type RelayToParaThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs similarity index 76% rename from cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index 6bdf89e6f27..edaaa998a9c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -54,14 +54,18 @@ fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::LocalReserve), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -69,14 +73,18 @@ fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { fn para_to_ah_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::DestinationReserve), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -85,14 +93,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubRococo::para_id()); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), bx!(fee.id.into()), bx!(TransferType::RemoteReserve(asset_hub_location.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -100,14 +112,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -115,14 +131,18 @@ fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> Dispatc fn asset_hub_to_para_teleport_foreign_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -626,3 +646,166 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici asset_hub_to_para_teleport_foreign_assets, ); } + +// =============================================================== +// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== +// =============================================================== +/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid +/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their +/// Sovereign Account on Asset Hub. +#[test] +fn transfer_native_asset_from_relay_to_para_through_asset_hub() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalA::para_id()); + let sender = RococoSender::get(); + let amount_to_send: Balance = ROCOCO_ED * 1000; + + // Init values for Parachain + let relay_native_asset_location = RelayLocation::get(); + let receiver = PenpalAReceiver::get(); + + // Init Test + let test_args = TestContext { + sender, + receiver: receiver.clone(), + args: TestArgs::new_relay(destination.clone(), receiver.clone(), amount_to_send), + }; + let mut test = RelayToParaThroughAHTest::new(test_args); + + let sov_penpal_on_ah = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + // Query initial balances + let sender_balance_before = test.sender.balance; + let sov_penpal_on_ah_before = AssetHubRococo::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &receiver) + }); + + fn relay_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + Rococo::assert_xcm_pallet_attempted_complete(None); + assert_expected_events!( + Rococo, + vec![ + // Amount to teleport is withdrawn from Sender + RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + // Amount to teleport is deposited in Relay's `CheckAccount` + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { + who: *who == ::XcmPallet::check_account(), + amount: *amount == t.args.amount, + }, + ] + ); + } + fn asset_hub_assertions(_: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ah = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + assert_expected_events!( + AssetHubRococo, + vec![ + // Deposited to receiver parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Minted { who, .. } + ) => { + who: *who == sov_penpal_on_ah, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } + fn penpal_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let expected_id = + t.args.assets.into_inner().first().unwrap().id.0.clone().try_into().unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } + fn transfer_assets_dispatchable(t: RelayToParaThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location = Rococo::child_location_of(AssetHubRococo::para_id()); + let context = RococoUniversalLocation::get(); + + // reanchor fees to the view of destination (Penpal) + let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); + if let Fungible(ref mut amount) = remote_fees.fun { + // we already spent some fees along the way, just use half of what we started with + *amount = *amount / 2; + } + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, + DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }, + ]); + + // reanchor final dest (Penpal) to the view of hop (Asset Hub) + let mut dest = t.args.dest.clone(); + dest.reanchor(&asset_hub_location, &context).unwrap(); + // on Asset Hub, forward assets to Penpal + let xcm_on_hop = Xcm::<()>(vec![DepositReserveAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + dest, + xcm: xcm_on_final_dest, + }]); + + // First leg is a teleport, from there a local-reserve-transfer to final dest + ::XcmPallet::transfer_assets_using_type_and_then( + t.signed_origin, + bx!(asset_hub_location.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::Teleport), + bx!(VersionedXcm::from(xcm_on_hop)), + t.args.weight_limit, + ) + } + + // Set assertions and dispatchables + test.set_assertion::(relay_assertions); + test.set_assertion::(asset_hub_assertions); + test.set_assertion::(penpal_assertions); + test.set_dispatchable::(transfer_assets_dispatchable); + test.assert(); + + // Query final balances + let sender_balance_after = test.sender.balance; + let sov_penpal_on_ah_after = AssetHubRococo::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); + // SA on AH balance is increased + assert!(sov_penpal_on_ah_after > sov_penpal_on_ah_before); + // Receiver's asset balance is increased + assert!(receiver_assets_after > receiver_assets_before); + // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_assets_after < receiver_assets_before + amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs index 346af308238..138ce419757 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod foreign_assets_transfers; +mod hybrid_transfers; mod reserve_transfer; mod send; mod set_xcm_versions; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 5aef70f5cbf..8b9fedcd494 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -574,7 +574,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender = RococoSender::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; - // Init values fot Parachain + // Init values for Parachain let relay_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index e687251c14f..1c4a0ef4c8d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -74,7 +74,9 @@ mod imports { LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, }; - pub use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; + pub use westend_runtime::xcm_config::{ + UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, + }; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; @@ -87,6 +89,7 @@ mod imports { pub type ParaToSystemParaTest = Test; pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; + pub type RelayToParaThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs similarity index 76% rename from cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index 8cfda37c84c..d39c72c7c5f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -54,14 +54,18 @@ fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::LocalReserve), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -69,14 +73,18 @@ fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { fn para_to_ah_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::DestinationReserve), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -85,14 +93,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), bx!(fee.id.into()), bx!(TransferType::RemoteReserve(asset_hub_location.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -100,14 +112,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -115,14 +131,18 @@ fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> Dispatc fn asset_hub_to_para_teleport_foreign_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -627,3 +647,166 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici asset_hub_to_para_teleport_foreign_assets, ); } + +// =============================================================== +// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== +// =============================================================== +/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid +/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their +/// Sovereign Account on Asset Hub. +#[test] +fn transfer_native_asset_from_relay_to_para_through_asset_hub() { + // Init values for Relay + let destination = Westend::child_location_of(PenpalA::para_id()); + let sender = WestendSender::get(); + let amount_to_send: Balance = WESTEND_ED * 1000; + + // Init values for Parachain + let relay_native_asset_location = RelayLocation::get(); + let receiver = PenpalAReceiver::get(); + + // Init Test + let test_args = TestContext { + sender, + receiver: receiver.clone(), + args: TestArgs::new_relay(destination.clone(), receiver.clone(), amount_to_send), + }; + let mut test = RelayToParaThroughAHTest::new(test_args); + + let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + // Query initial balances + let sender_balance_before = test.sender.balance; + let sov_penpal_on_ah_before = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &receiver) + }); + + fn relay_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + Westend::assert_xcm_pallet_attempted_complete(None); + assert_expected_events!( + Westend, + vec![ + // Amount to teleport is withdrawn from Sender + RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + // Amount to teleport is deposited in Relay's `CheckAccount` + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { + who: *who == ::XcmPallet::check_account(), + amount: *amount == t.args.amount, + }, + ] + ); + } + fn asset_hub_assertions(_: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + assert_expected_events!( + AssetHubWestend, + vec![ + // Deposited to receiver parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Minted { who, .. } + ) => { + who: *who == sov_penpal_on_ah, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } + fn penpal_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let expected_id = + t.args.assets.into_inner().first().unwrap().id.0.clone().try_into().unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } + fn transfer_assets_dispatchable(t: RelayToParaThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location = Westend::child_location_of(AssetHubWestend::para_id()); + let context = WestendUniversalLocation::get(); + + // reanchor fees to the view of destination (Penpal) + let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); + if let Fungible(ref mut amount) = remote_fees.fun { + // we already spent some fees along the way, just use half of what we started with + *amount = *amount / 2; + } + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, + DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }, + ]); + + // reanchor final dest (Penpal) to the view of hop (Asset Hub) + let mut dest = t.args.dest.clone(); + dest.reanchor(&asset_hub_location, &context).unwrap(); + // on Asset Hub, forward assets to Penpal + let xcm_on_hop = Xcm::<()>(vec![DepositReserveAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + dest, + xcm: xcm_on_final_dest, + }]); + + // First leg is a teleport, from there a local-reserve-transfer to final dest + ::XcmPallet::transfer_assets_using_type_and_then( + t.signed_origin, + bx!(asset_hub_location.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::Teleport), + bx!(VersionedXcm::from(xcm_on_hop)), + t.args.weight_limit, + ) + } + + // Set assertions and dispatchables + test.set_assertion::(relay_assertions); + test.set_assertion::(asset_hub_assertions); + test.set_assertion::(penpal_assertions); + test.set_dispatchable::(transfer_assets_dispatchable); + test.assert(); + + // Query final balances + let sender_balance_after = test.sender.balance; + let sov_penpal_on_ah_after = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); + // SA on AH balance is increased + assert!(sov_penpal_on_ah_after > sov_penpal_on_ah_before); + // Receiver's asset balance is increased + assert!(receiver_assets_after > receiver_assets_before); + // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_assets_after < receiver_assets_before + amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs index e463e21e9e5..bf013697b4c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs @@ -14,7 +14,7 @@ // limitations under the License. mod fellowship_treasury; -mod foreign_assets_transfers; +mod hybrid_transfers; mod reserve_transfer; mod send; mod set_xcm_versions; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index df01eb0d48a..65d013a0eec 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -574,7 +574,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender = WestendSender::get(); let amount_to_send: Balance = WESTEND_ED * 1000; - // Init values fot Parachain + // Init values for Parachain let relay_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 69d625be280..87fb70e4de2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -60,15 +60,19 @@ fn send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); let assets: Assets = (id.clone(), transfer_amount).into(); let fees_id: AssetId = id.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); - ::PolkadotXcm::transfer_assets_using_type( + ::PolkadotXcm::transfer_assets_using_type_and_then( signed_origin, bx!(destination.into()), - bx!(beneficiary.into()), bx!(assets.clone().into()), bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), bx!(fees_id.into()), bx!(TransferType::RemoteReserve(local_asset_hub.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), WeightLimit::Unlimited, ) })); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 3a8ce7d43f3..597e77d9049 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -59,15 +59,19 @@ fn send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); let assets: Assets = (id.clone(), transfer_amount).into(); let fees_id: AssetId = id.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); - ::PolkadotXcm::transfer_assets_using_type( + ::PolkadotXcm::transfer_assets_using_type_and_then( signed_origin, bx!(destination.into()), - bx!(beneficiary.into()), bx!(assets.into()), bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), bx!(fees_id.into()), bx!(TransferType::RemoteReserve(local_asset_hub.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), WeightLimit::Unlimited, ) })); diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 698ec6998b4..f6c301d5b04 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -45,7 +45,7 @@ use sp_runtime::{ AccountIdConversion, BadOrigin, BlakeTwo256, BlockNumberProvider, Dispatchable, Hash, Saturating, Zero, }, - RuntimeDebug, + Either, RuntimeDebug, }; use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; @@ -1311,7 +1311,7 @@ pub mod pallet { Self::do_transfer_assets( origin, dest, - beneficiary, + Either::Left(beneficiary), assets, assets_transfer_type, fee_asset_item, @@ -1421,50 +1421,60 @@ pub mod pallet { /// - `TransferType::Teleport`: burn local assets and forward XCM to `dest` chain to /// mint/teleport assets and deposit them to `beneficiary`. /// - /// Fee payment on the source, destination and all intermediary hops, is specified through - /// `fees_id`, but make sure enough of the specified `fees_id` asset is included in the - /// given list of `assets`. `fees_id` should be enough to pay for `weight_limit`. If more - /// weight is needed than `weight_limit`, then the operation will fail and the sent assets - /// may be at risk. + /// On the destination chain, as well as any intermediary hops, `BuyExecution` is used to + /// buy execution using transferred `assets` identified by `remote_fees_id`. + /// Make sure enough of the specified `remote_fees_id` asset is included in the given list + /// of `assets`. `remote_fees_id` should be enough to pay for `weight_limit`. If more weight + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be + /// at risk. + /// + /// `remote_fees_id` may use different transfer type than rest of `assets` and can be + /// specified through `fees_transfer_type`. /// - /// `fees_id` may use different transfer type than rest of `assets` and can be specified - /// through `fees_transfer_type`. + /// The caller needs to specify what should happen to the transferred assets once they reach + /// the `dest` chain. This is done through the `custom_xcm_on_dest` parameter, which + /// contains the instructions to execute on `dest` as a final step. + /// This is usually as simple as: + /// `Xcm(vec![DepositAsset { assets: Wild(AllCounted(assets.len())), beneficiary }])`, + /// but could be something more exotic like sending the `assets` even further. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. /// - `dest`: Destination context for the assets. Will typically be `[Parent, /// Parachain(..)]` to send from parachain to parachain, or `[Parachain(..)]` to send from /// relay to parachain, or `(parents: 2, (GlobalConsensus(..), ..))` to send from /// parachain across a bridge to another ecosystem destination. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will - /// generally be an `AccountId32` value. /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the /// fee on the `dest` (and possibly reserve) chains. /// - `assets_transfer_type`: The XCM `TransferType` used to transfer the `assets`. - /// - `fees_id`: One of the included `assets` to be be used to pay fees. + /// - `remote_fees_id`: One of the included `assets` to be be used to pay fees. /// - `fees_transfer_type`: The XCM `TransferType` used to transfer the `fees` assets. + /// - `custom_xcm_on_dest`: The XCM to be executed on `dest` chain as the last step of the + /// transfer, which also determines what happens to the assets on the destination chain. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::transfer_assets())] - pub fn transfer_assets_using_type( + pub fn transfer_assets_using_type_and_then( origin: OriginFor, dest: Box, - beneficiary: Box, assets: Box, assets_transfer_type: Box, - fees_id: Box, + remote_fees_id: Box, fees_transfer_type: Box, + custom_xcm_on_dest: Box>, weight_limit: WeightLimit, ) -> DispatchResult { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; let dest: Location = (*dest).try_into().map_err(|()| Error::::BadVersion)?; - let beneficiary: Location = - (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - let fees_id: AssetId = (*fees_id).try_into().map_err(|()| Error::::BadVersion)?; + let fees_id: AssetId = + (*remote_fees_id).try_into().map_err(|()| Error::::BadVersion)?; + let remote_xcm: Xcm<()> = + (*custom_xcm_on_dest).try_into().map_err(|()| Error::::BadVersion)?; log::debug!( - target: "xcm::pallet_xcm::transfer_assets_using_type", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?} through {:?}, fees-id {:?} through {:?}", - origin_location, dest, beneficiary, assets, assets_transfer_type, fees_id, fees_transfer_type, + target: "xcm::pallet_xcm::transfer_assets_using_type_and_then", + "origin {origin_location:?}, dest {dest:?}, assets {assets:?} through {assets_transfer_type:?}, \ + remote_fees_id {fees_id:?} through {fees_transfer_type:?}, \ + custom_xcm_on_dest {remote_xcm:?}, weight-limit {weight_limit:?}", ); let assets = assets.into_inner(); @@ -1475,7 +1485,7 @@ pub mod pallet { Self::do_transfer_assets( origin_location, dest, - beneficiary, + Either::Right(remote_xcm), assets, *assets_transfer_type, fee_asset_index, @@ -1650,7 +1660,7 @@ impl Pallet { let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( origin.clone(), dest.clone(), - beneficiary, + Either::Left(beneficiary), assets, assets_transfer_type, FeesHandling::Batched { fees }, @@ -1692,7 +1702,7 @@ impl Pallet { let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( origin_location.clone(), dest.clone(), - beneficiary, + Either::Left(beneficiary), assets, TransferType::Teleport, FeesHandling::Batched { fees }, @@ -1704,7 +1714,7 @@ impl Pallet { fn do_transfer_assets( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, mut assets: Vec, assets_transfer_type: TransferType, fee_asset_index: usize, @@ -1770,7 +1780,7 @@ impl Pallet { fn build_xcm_transfer_type( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, transfer_type: TransferType, fees: FeesHandling, @@ -1782,57 +1792,51 @@ impl Pallet { fees_handling {:?}, weight_limit: {:?}", origin, dest, beneficiary, assets, transfer_type, fees, weight_limit, ); - Ok(match transfer_type { - TransferType::LocalReserve => { - let (local, remote) = Self::local_reserve_transfer_programs( - origin.clone(), - dest.clone(), - beneficiary, - assets, - fees, - weight_limit, - )?; - (local, Some(remote)) - }, - TransferType::DestinationReserve => { - let (local, remote) = Self::destination_reserve_transfer_programs( - origin.clone(), - dest.clone(), - beneficiary, - assets, - fees, - weight_limit, - )?; - (local, Some(remote)) - }, + match transfer_type { + TransferType::LocalReserve => Self::local_reserve_transfer_programs( + origin.clone(), + dest.clone(), + beneficiary, + assets, + fees, + weight_limit, + ) + .map(|(local, remote)| (local, Some(remote))), + TransferType::DestinationReserve => Self::destination_reserve_transfer_programs( + origin.clone(), + dest.clone(), + beneficiary, + assets, + fees, + weight_limit, + ) + .map(|(local, remote)| (local, Some(remote))), TransferType::RemoteReserve(reserve) => { let fees = match fees { FeesHandling::Batched { fees } => fees, _ => return Err(Error::::InvalidAssetUnsupportedReserve.into()), }; - let local = Self::remote_reserve_transfer_program( + Self::remote_reserve_transfer_program( origin.clone(), reserve.try_into().map_err(|()| Error::::BadVersion)?, - dest.clone(), beneficiary, - assets, - fees, - weight_limit, - )?; - (local, None) - }, - TransferType::Teleport => { - let (local, remote) = Self::teleport_assets_program( - origin.clone(), dest.clone(), - beneficiary, assets, fees, weight_limit, - )?; - (local, Some(remote)) + ) + .map(|local| (local, None)) }, - }) + TransferType::Teleport => Self::teleport_assets_program( + origin.clone(), + dest.clone(), + beneficiary, + assets, + fees, + weight_limit, + ) + .map(|(local, remote)| (local, Some(remote))), + } } fn execute_xcm_transfer( @@ -1947,7 +1951,7 @@ impl Pallet { fn local_reserve_transfer_programs( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, fees: FeesHandling, weight_limit: WeightLimit, @@ -1980,10 +1984,16 @@ impl Pallet { ]); // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest - .inner_mut() - .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_remote_xcm = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_remote_xcm.into_iter()); Ok((local_execute_xcm, xcm_on_dest)) } @@ -2022,7 +2032,7 @@ impl Pallet { fn destination_reserve_transfer_programs( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, fees: FeesHandling, weight_limit: WeightLimit, @@ -2058,10 +2068,15 @@ impl Pallet { // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest - .inner_mut() - .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_remote_xcm = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_remote_xcm.into_iter()); Ok((local_execute_xcm, xcm_on_dest)) } @@ -2070,8 +2085,8 @@ impl Pallet { fn remote_reserve_transfer_program( origin: Location, reserve: Location, + beneficiary: Either>, dest: Location, - beneficiary: Location, assets: Vec, fees: Asset, weight_limit: WeightLimit, @@ -2096,10 +2111,17 @@ impl Pallet { // identifies `dest` as seen by `reserve` let dest = dest.reanchored(&reserve, &context).map_err(|_| Error::::CannotReanchor)?; // xcm to be executed at dest - let xcm_on_dest = Xcm(vec![ - BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); + let mut xcm_on_dest = + Xcm(vec![BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }]); + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_xcm_on_dest = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_xcm_on_dest.into_iter()); // xcm to be executed on reserve let xcm_on_reserve = Xcm(vec![ BuyExecution { fees: reserve_fees, weight_limit }, @@ -2171,7 +2193,7 @@ impl Pallet { fn teleport_assets_program( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, fees: FeesHandling, weight_limit: WeightLimit, @@ -2231,10 +2253,16 @@ impl Pallet { ]); // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest - .inner_mut() - .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_remote_xcm = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_remote_xcm.into_iter()); Ok((local_execute_xcm, xcm_on_dest)) } diff --git a/prdoc/pr_3695.prdoc b/prdoc/pr_3695.prdoc index 2c2c2b2e691..cc54fb240cd 100644 --- a/prdoc/pr_3695.prdoc +++ b/prdoc/pr_3695.prdoc @@ -6,7 +6,7 @@ title: "pallet-xcm: add new extrinsic for asset transfers using explicit reserve doc: - audience: Runtime User description: | - pallet-xcm has a new extrinsic `transfer_assets_using_type` for transferring + pallet-xcm has a new extrinsic `transfer_assets_using_type_and_then` for transferring assets from local chain to destination chain using an explicit XCM transfer types for transferring the assets and the fees: - `TransferType::LocalReserve`: transfer assets to sovereign account of destination @@ -33,6 +33,13 @@ doc: Same when transferring bridged assets back across the bridge, the local bridging parachain must be used as the explicit reserve location. + The new method takes a `custom_xcm_on_dest` parameter allowing the caller to specify + what should happen to the transferred assets once they reach + the `dest` chain. The `custom_xcm_on_dest` parameter should contains the instructions + to execute on `dest` as a final step. Usually as simple as: + `Xcm(vec![DepositAsset { assets: Wild(AllCounted(assets.len())), beneficiary }])`, + but could be something more exotic like sending the `assets` even further. + crates: - name: pallet-xcm bump: minor -- GitLab From c594b10a803e218f63c1bd97d2b27454efb4e852 Mon Sep 17 00:00:00 2001 From: Alexander Kalankhodzhaev Date: Wed, 24 Apr 2024 16:30:47 +0700 Subject: [PATCH 063/107] Remove unnecessary cloning (#4263) Seems like Externalities already [return a vector](https://github.com/paritytech/polkadot-sdk/blob/ffbce2a817ec2e7c8b7ce49f7ed6794584f19667/substrate/primitives/externalities/src/lib.rs#L86), so calling `to_vec` on a vector just results in an unneeded copying. Co-authored-by: Liam Aharon --- substrate/primitives/io/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index ec32b729033..c8675a9a90b 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -182,7 +182,7 @@ impl From for KillStorageResult { pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. fn get(&self, key: &[u8]) -> Option { - self.storage(key).map(|s| bytes::Bytes::from(s.to_vec())) + self.storage(key).map(bytes::Bytes::from) } /// Get `key` from storage, placing the value into `value_out` and return the number of -- GitLab From 8dc0b337889ef0d227c0a95681b340ee0e80a297 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Wed, 24 Apr 2024 16:26:25 +0300 Subject: [PATCH 064/107] [BEEFY] Return valid signatures when verifying commitment (#4259) Trying to split parts of the https://github.com/paritytech/polkadot-sdk/pull/1903 into smaller PRs For https://github.com/paritytech/polkadot-sdk/pull/1903 it would help if `verify_with_validator_set()` returned the list of valid authority-signatures pairs, since after the verification we need to send them in the equivocation proof. --- .../consensus/beefy/src/justification.rs | 57 ++++++--------- .../consensus/beefy/src/commitment.rs | 70 +++++++++++++++++-- .../primitives/consensus/beefy/src/lib.rs | 2 +- 3 files changed, 88 insertions(+), 41 deletions(-) diff --git a/substrate/client/consensus/beefy/src/justification.rs b/substrate/client/consensus/beefy/src/justification.rs index 7f1b9e5237c..886368c9d7c 100644 --- a/substrate/client/consensus/beefy/src/justification.rs +++ b/substrate/client/consensus/beefy/src/justification.rs @@ -16,12 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::keystore::BeefyKeystore; -use codec::{DecodeAll, Encode}; +use codec::DecodeAll; use sp_consensus::Error as ConsensusError; use sp_consensus_beefy::{ ecdsa_crypto::{AuthorityId, Signature}, - ValidatorSet, ValidatorSetId, VersionedFinalityProof, + BeefySignatureHasher, KnownSignature, ValidatorSet, ValidatorSetId, VersionedFinalityProof, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -45,46 +44,31 @@ pub(crate) fn decode_and_verify_finality_proof( ) -> Result, (ConsensusError, u32)> { let proof = >::decode_all(&mut &*encoded) .map_err(|_| (ConsensusError::InvalidJustification, 0))?; - verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) + verify_with_validator_set::(target_number, validator_set, &proof)?; + Ok(proof) } /// Verify the Beefy finality proof against the validator set at the block it was generated. -pub(crate) fn verify_with_validator_set( +pub(crate) fn verify_with_validator_set<'a, Block: BlockT>( target_number: NumberFor, - validator_set: &ValidatorSet, - proof: &BeefyVersionedFinalityProof, -) -> Result<(), (ConsensusError, u32)> { - let mut signatures_checked = 0u32; + validator_set: &'a ValidatorSet, + proof: &'a BeefyVersionedFinalityProof, +) -> Result>, (ConsensusError, u32)> { match proof { VersionedFinalityProof::V1(signed_commitment) => { - if signed_commitment.signatures.len() != validator_set.len() || - signed_commitment.commitment.validator_set_id != validator_set.id() || - signed_commitment.commitment.block_number != target_number - { - return Err((ConsensusError::InvalidJustification, 0)) - } - - // Arrangement of signatures in the commitment should be in the same order - // as validators for that set. - let message = signed_commitment.commitment.encode(); - let valid_signatures = validator_set - .validators() - .into_iter() - .zip(signed_commitment.signatures.iter()) - .filter(|(id, signature)| { - signature - .as_ref() - .map(|sig| { - signatures_checked += 1; - BeefyKeystore::verify(*id, sig, &message[..]) - }) - .unwrap_or(false) - }) - .count(); - if valid_signatures >= crate::round::threshold(validator_set.len()) { - Ok(()) + let signatories = signed_commitment + .verify_signatures::<_, BeefySignatureHasher>(target_number, validator_set) + .map_err(|checked_signatures| { + (ConsensusError::InvalidJustification, checked_signatures) + })?; + + if signatories.len() >= crate::round::threshold(validator_set.len()) { + Ok(signatories) } else { - Err((ConsensusError::InvalidJustification, signatures_checked)) + Err(( + ConsensusError::InvalidJustification, + signed_commitment.signature_count() as u32, + )) } }, } @@ -92,6 +76,7 @@ pub(crate) fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { + use codec::Encode; use sp_consensus_beefy::{ known_payloads, test_utils::Keyring, Commitment, Payload, SignedCommitment, VersionedFinalityProof, diff --git a/substrate/primitives/consensus/beefy/src/commitment.rs b/substrate/primitives/consensus/beefy/src/commitment.rs index 4fd9e1b0a6e..8d3a6c6aa90 100644 --- a/substrate/primitives/consensus/beefy/src/commitment.rs +++ b/substrate/primitives/consensus/beefy/src/commitment.rs @@ -19,8 +19,30 @@ use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, Error, Input}; use core::cmp; use scale_info::TypeInfo; +use sp_application_crypto::RuntimeAppPublic; +use sp_runtime::traits::Hash; + +use crate::{BeefyAuthorityId, Payload, ValidatorSet, ValidatorSetId}; + +/// A commitment signature, accompanied by the id of the validator that it belongs to. +#[derive(Debug)] +pub struct KnownSignature { + /// The signing validator. + pub validator_id: TAuthorityId, + /// The signature. + pub signature: TSignature, +} -use crate::{Payload, ValidatorSetId}; +impl KnownSignature<&TAuthorityId, &TSignature> { + /// Creates a `KnownSignature` from an + /// `KnownSignature<&TAuthorityId, &TSignature>`. + pub fn to_owned(&self) -> KnownSignature { + KnownSignature { + validator_id: self.validator_id.clone(), + signature: self.signature.clone(), + } + } +} /// A commitment signed by GRANDPA validators as part of BEEFY protocol. /// @@ -113,9 +135,49 @@ impl core::fmt::Display impl SignedCommitment { /// Return the number of collected signatures. - pub fn no_of_signatures(&self) -> usize { + pub fn signature_count(&self) -> usize { self.signatures.iter().filter(|x| x.is_some()).count() } + + /// Verify all the commitment signatures against the validator set that was active + /// at the block where the commitment was generated. + /// + /// Returns the valid validator-signature pairs if the commitment can be verified. + pub fn verify_signatures<'a, TAuthorityId, MsgHash>( + &'a self, + target_number: TBlockNumber, + validator_set: &'a ValidatorSet, + ) -> Result>, u32> + where + TBlockNumber: Clone + Encode + PartialEq, + TAuthorityId: RuntimeAppPublic + BeefyAuthorityId, + MsgHash: Hash, + { + if self.signatures.len() != validator_set.len() || + self.commitment.validator_set_id != validator_set.id() || + self.commitment.block_number != target_number + { + return Err(0) + } + + // Arrangement of signatures in the commitment should be in the same order + // as validators for that set. + let encoded_commitment = self.commitment.encode(); + let signatories: Vec<_> = validator_set + .validators() + .into_iter() + .zip(self.signatures.iter()) + .filter_map(|(id, maybe_signature)| { + let signature = maybe_signature.as_ref()?; + match BeefyAuthorityId::verify(id, signature, &encoded_commitment) { + true => Some(KnownSignature { validator_id: id, signature }), + false => None, + } + }) + .collect(); + + Ok(signatories) + } } /// Type to be used to denote placement of signatures @@ -439,13 +501,13 @@ mod tests { commitment, signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], }; - assert_eq!(signed.no_of_signatures(), 2); + assert_eq!(signed.signature_count(), 2); // when signed.signatures[2] = None; // then - assert_eq!(signed.no_of_signatures(), 1); + assert_eq!(signed.signature_count(), 1); } #[test] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 70978ca559d..6f644c5f790 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -43,7 +43,7 @@ pub mod witness; #[cfg(feature = "std")] pub mod test_utils; -pub use commitment::{Commitment, SignedCommitment, VersionedFinalityProof}; +pub use commitment::{Commitment, KnownSignature, SignedCommitment, VersionedFinalityProof}; pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; use alloc::vec::Vec; -- GitLab From ac473cfa7b32d9a2ae9c2445de772a9cc4c460a1 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 24 Apr 2024 16:23:23 +0200 Subject: [PATCH 065/107] `AllowHrmpNotificationsFromRelayChain` barrier for HRMP notifications from the relaychain (#4156) This PR: - introduces `AllowHrmpNotificationsFromRelayChain` barrier for allowing HRMP notifications just from the relay chain (to fulfill safety assumptions - [see](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v4/mod.rs#L532)) - sets it up for all testnet SP parachains Continuation of: https://github.com/paritytech/polkadot-sdk/pull/3696 --- .../assets/asset-hub-rococo/src/xcm_config.rs | 24 ++- .../asset-hub-westend/src/xcm_config.rs | 10 +- .../bridge-hub-rococo/src/xcm_config.rs | 15 +- .../bridge-hub-westend/src/xcm_config.rs | 18 +- .../collectives-westend/src/xcm_config.rs | 19 +- .../contracts-rococo/src/xcm_config.rs | 18 +- .../coretime-rococo/src/xcm_config.rs | 16 +- .../coretime-westend/src/xcm_config.rs | 16 +- .../people/people-rococo/src/xcm_config.rs | 16 +- .../people/people-westend/src/xcm_config.rs | 16 +- .../runtimes/testing/penpal/src/xcm_config.rs | 19 +- .../testing/rococo-parachain/src/lib.rs | 8 +- polkadot/runtime/common/src/lib.rs | 2 +- polkadot/xcm/xcm-builder/src/barriers.rs | 35 ++++ polkadot/xcm/xcm-builder/src/lib.rs | 9 +- .../xcm/xcm-builder/src/tests/barriers.rs | 194 +++++++++++++----- prdoc/pr_4156.prdoc | 13 ++ 17 files changed, 309 insertions(+), 139 deletions(-) create mode 100644 prdoc/pr_4156.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index dbf27fb39ac..a73c1cc33ea 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -49,17 +49,17 @@ use testnet_parachains_constants::rococo::snowbridge::{ }; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, - IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, - StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, + GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, + NetworkExportTableItem, NoChecking, NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignPaidRemoteExporter, + SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -285,6 +285,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index ed8a58af396..d610bfd768c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -45,10 +45,10 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeFamily, DescribePalletTerminal, EnsureXcmOrigin, - FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, @@ -299,6 +299,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index f354ccce21f..bd1445bee22 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -46,12 +46,13 @@ use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; use xcm::latest::prelude::*; use xcm_builder::{ deposit_or_burn_fee, AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, - AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, HandleFee, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeToAccount, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HandleFee, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeToAccount, }; use xcm_executor::{ traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset}, @@ -150,6 +151,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 31c37c8ffab..f147cd9653f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -38,14 +38,14 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -139,6 +139,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 4449284b8aa..84697c3e363 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -35,14 +35,15 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use westend_runtime_constants::xcm as xcm_constants; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, - IsConcrete, LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, LocatableAssetId, + OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -156,6 +157,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 9132b4e1760..ac15ac5b0f0 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -38,14 +38,14 @@ use sp_runtime::traits::AccountIdConversion; use testnet_parachains_constants::rococo::currency::CENTS; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, - IsConcrete, NativeAsset, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -149,6 +149,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 3e71730e015..9095b5b1caa 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -39,13 +39,13 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, - NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, IsConcrete, NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -156,6 +156,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index fc7ecf1e61c..defc57e2d7f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -39,13 +39,13 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, - NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, IsConcrete, NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -164,6 +164,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, FellowsPlurality)>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index e4e4fa1b2c4..101d9a180e5 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -36,13 +36,13 @@ use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, - HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeTerminus, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -165,6 +165,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 590f23f6853..0a903f91505 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -36,13 +36,13 @@ use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, - HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeTerminus, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -173,6 +173,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, FellowsPlurality)>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 6832e2f4f44..711041f6d6e 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -43,14 +43,15 @@ use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; use sp_runtime::traits::{AccountIdConversion, ConvertInto, Identity, TryConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, - AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, EnsureXcmOrigin, - FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, IsConcrete, - LocalMint, NativeAsset, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, StartsWith, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + AccountId32Aliases, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, + ConvertedConcreteId, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, + FungibleAdapter, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, NoChecking, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -217,6 +218,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 0ae93d1577c..11da6adb819 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -74,9 +74,9 @@ use parachains_common::{ AccountId, AssetIdForTrustBackedAssets, Signature, }; use xcm_builder::{ - AllowKnownQueryResponses, AllowSubscriptionsFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, - FrameTransactionalProcessor, FungiblesAdapter, LocalMint, TrailingSetTopicAsId, - WithUniqueTopic, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AsPrefixedGeneralIndex, ConvertedConcreteId, FrameTransactionalProcessor, FungiblesAdapter, + LocalMint, TrailingSetTopicAsId, WithUniqueTopic, }; use xcm_executor::traits::JustTry; @@ -444,6 +444,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowKnownQueryResponses, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, )>; parameter_types! { diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 65161764ccd..60cc684149b 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Common runtime code for Polkadot and Kusama. +//! Common runtime code for the Relay Chain, e.g. Rococo, Westend, Polkadot, Kusama ... #![cfg_attr(not(feature = "std"), no_std)] diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index c0b328f38e9..11e9122f9a1 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -399,6 +399,41 @@ impl> ShouldExecute for AllowSubscriptionsFrom { } } +/// Allows execution for the Relay Chain origin (represented as `Location::parent()`) if it is just +/// a straight `HrmpNewChannelOpenRequest`, `HrmpChannelAccepted`, or `HrmpChannelClosing` +/// instruction. +/// +/// Note: This barrier fulfills safety recommendations for the mentioned instructions - see their +/// documentation. +pub struct AllowHrmpNotificationsFromRelayChain; +impl ShouldExecute for AllowHrmpNotificationsFromRelayChain { + fn should_execute( + origin: &Location, + instructions: &mut [Instruction], + _max_weight: Weight, + _properties: &mut Properties, + ) -> Result<(), ProcessMessageError> { + log::trace!( + target: "xcm::barriers", + "AllowHrmpNotificationsFromRelayChain origin: {:?}, instructions: {:?}, max_weight: {:?}, properties: {:?}", + origin, instructions, _max_weight, _properties, + ); + // accept only the Relay Chain + ensure!(matches!(origin.unpack(), (1, [])), ProcessMessageError::Unsupported); + // accept only HRMP notifications and nothing else + instructions + .matcher() + .assert_remaining_insts(1)? + .match_next_inst(|inst| match inst { + HrmpNewChannelOpenRequest { .. } | + HrmpChannelAccepted { .. } | + HrmpChannelClosing { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + })?; + Ok(()) + } +} + /// Deny executing the XCM if it matches any of the Deny filter regardless of anything else. /// If it passes the Deny, and matches one of the Allow cases then it is let through. pub struct DenyThenTry(PhantomData, PhantomData) diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index cdc663a0cc9..977da9a55de 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -35,10 +35,11 @@ pub use asset_conversion::{ mod barriers; pub use barriers::{ - AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, - AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, IsChildSystemParachain, IsParentsOnly, IsSiblingSystemParachain, - RespectSuspension, TakeWeightCredit, TrailingSetTopicAsId, WithComputedOrigin, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + AllowUnpaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, IsChildSystemParachain, + IsParentsOnly, IsSiblingSystemParachain, RespectSuspension, TakeWeightCredit, + TrailingSetTopicAsId, WithComputedOrigin, }; mod controller; diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index 6516263f57a..665b5febc61 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -315,56 +315,150 @@ fn allow_subscriptions_from_should_work() { // allow only parent AllowSubsFrom::set(vec![Location::parent()]); - let valid_xcm_1 = Xcm::(vec![SubscribeVersion { - query_id: 42, - max_response_weight: Weight::from_parts(5000, 5000), - }]); - let valid_xcm_2 = Xcm::(vec![UnsubscribeVersion]); - let invalid_xcm_1 = Xcm::(vec![ - SetAppendix(Xcm(vec![])), - SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, - ]); - let invalid_xcm_2 = Xcm::(vec![ - SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, - SetTopic([0; 32]), - ]); + // closure for (xcm, origin) testing with `AllowSubscriptionsFrom` + let assert_should_execute = |mut xcm: Vec>, origin, expected_result| { + assert_eq!( + AllowSubscriptionsFrom::>::should_execute( + &origin, + &mut xcm, + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ), + expected_result + ); + }; + + // invalid origin + assert_should_execute( + vec![SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }], + Parachain(1).into_location(), + Err(ProcessMessageError::Unsupported), + ); + assert_should_execute( + vec![UnsubscribeVersion], + Parachain(1).into_location(), + Err(ProcessMessageError::Unsupported), + ); - let test_data = vec![ - ( - valid_xcm_1.clone(), - Parachain(1).into_location(), - // not allowed origin - Err(ProcessMessageError::Unsupported), - ), - (valid_xcm_1, Location::parent(), Ok(())), - ( - valid_xcm_2.clone(), - Parachain(1).into_location(), - // not allowed origin - Err(ProcessMessageError::Unsupported), - ), - (valid_xcm_2, Location::parent(), Ok(())), - ( - invalid_xcm_1, - Location::parent(), - // invalid XCM - Err(ProcessMessageError::BadFormat), - ), - ( - invalid_xcm_2, - Location::parent(), - // invalid XCM - Err(ProcessMessageError::BadFormat), - ), - ]; - - for (mut message, origin, expected_result) in test_data { - let r = AllowSubscriptionsFrom::>::should_execute( - &origin, - message.inner_mut(), - Weight::from_parts(10, 10), - &mut props(Weight::zero()), + // invalid XCM (unexpected instruction before) + assert_should_execute( + vec![ + SetAppendix(Xcm(vec![])), + SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }, + ], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + assert_should_execute( + vec![SetAppendix(Xcm(vec![])), UnsubscribeVersion], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction after) + assert_should_execute( + vec![ + SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }, + SetTopic([0; 32]), + ], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + assert_should_execute( + vec![UnsubscribeVersion, SetTopic([0; 32])], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction) + assert_should_execute( + vec![SetAppendix(Xcm(vec![]))], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + + // ok + assert_should_execute( + vec![SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }], + Location::parent(), + Ok(()), + ); + assert_should_execute(vec![UnsubscribeVersion], Location::parent(), Ok(())); +} + +#[test] +fn allow_hrmp_notifications_from_relay_chain_should_work() { + // closure for (xcm, origin) testing with `AllowHrmpNotificationsFromRelayChain` + let assert_should_execute = |mut xcm: Vec>, origin, expected_result| { + assert_eq!( + AllowHrmpNotificationsFromRelayChain::should_execute( + &origin, + &mut xcm, + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ), + expected_result ); - assert_eq!(r, expected_result, "Failed for origin: {origin:?} and message: {message:?}"); - } + }; + + // invalid origin + assert_should_execute( + vec![HrmpChannelAccepted { recipient: Default::default() }], + Location::new(1, [Parachain(1)]), + Err(ProcessMessageError::Unsupported), + ); + + // invalid XCM (unexpected instruction before) + assert_should_execute( + vec![SetAppendix(Xcm(vec![])), HrmpChannelAccepted { recipient: Default::default() }], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction after) + assert_should_execute( + vec![HrmpChannelAccepted { recipient: Default::default() }, SetTopic([0; 32])], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction) + assert_should_execute( + vec![SetAppendix(Xcm(vec![]))], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + + // ok + assert_should_execute( + vec![HrmpChannelAccepted { recipient: Default::default() }], + Location::parent(), + Ok(()), + ); + assert_should_execute( + vec![HrmpNewChannelOpenRequest { + max_capacity: Default::default(), + sender: Default::default(), + max_message_size: Default::default(), + }], + Location::parent(), + Ok(()), + ); + assert_should_execute( + vec![HrmpChannelClosing { + recipient: Default::default(), + sender: Default::default(), + initiator: Default::default(), + }], + Location::parent(), + Ok(()), + ); } diff --git a/prdoc/pr_4156.prdoc b/prdoc/pr_4156.prdoc new file mode 100644 index 00000000000..fc09a4e0df4 --- /dev/null +++ b/prdoc/pr_4156.prdoc @@ -0,0 +1,13 @@ +title: "`AllowHrmpNotificationsFromRelayChain` barrier for HRMP notifications from the relaychain" + +doc: + - audience: Runtime Dev + description: | + A new barrier, `AllowHrmpNotificationsFromRelayChain`, has been added. + This barrier can be utilized to ensure that HRMP notifications originate solely from the Relay Chain. + If your runtime relies on these notifications, + you can include it in the runtime's barrier type for `xcm_executor::Config`. + +crates: +- name: staging-xcm-builder + bump: minor -- GitLab From d29c3636fac2cccadd774d467a2b891daca4d283 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Wed, 24 Apr 2024 16:54:07 +0200 Subject: [PATCH 066/107] Updated review-bot to obtain number from event (#4271) It seems that `review-trigger` is not uploading the artifact that is used by `review-bot`, so I changed the PR-Number to be obtained by the previous event that triggered this action. I also took the liberty to replace `tibdex/github-app-token` for `actions/create-github-app-token` which is GitHub's official app. --- .github/workflows/review-bot.yml | 14 +++++--------- .github/workflows/review-trigger.yml | 17 ++--------------- 2 files changed, 7 insertions(+), 24 deletions(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index 5b036115b23..fb877357b23 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -11,22 +11,18 @@ jobs: runs-on: ubuntu-latest environment: master steps: - - name: Extract content of artifact - id: number - uses: Bullrich/extract-text-from-artifact@v1.0.0 - with: - artifact-name: pr_number - name: Generate token id: app_token - uses: tibdex/github-app-token@v1 + uses: actions/create-github-app-token@v1.9.3 with: - app_id: ${{ secrets.REVIEW_APP_ID }} - private_key: ${{ secrets.REVIEW_APP_KEY }} + app-id: ${{ secrets.REVIEW_APP_ID }} + private-key: ${{ secrets.REVIEW_APP_KEY }} - name: "Evaluates PR reviews and assigns reviewers" uses: paritytech/review-bot@v2.4.0 with: repo-token: ${{ steps.app_token.outputs.token }} team-token: ${{ steps.app_token.outputs.token }} checks-token: ${{ steps.app_token.outputs.token }} - pr-number: ${{ steps.number.outputs.content }} + # This is extracted from the triggering event + pr-number: ${{ github.event.workflow_run.pull_requests[0].number }} request-reviewers: true diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 7f7d9d36278..6437be161d3 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -45,7 +45,7 @@ jobs: # We request them to review again echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input - - + echo "::error::Project needs to be reviewed again" exit 1 env: @@ -53,21 +53,8 @@ jobs: - name: Comment requirements # If the previous step failed and github-actions hasn't commented yet we comment instructions if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed') - run: | + run: | gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" env: GH_TOKEN: ${{ github.token }} COMMENTS: ${{ steps.comments.outputs.users }} - - name: Get PR number - env: - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo "Saving PR number: $PR_NUMBER" - mkdir -p ./pr - echo $PR_NUMBER > ./pr/pr_number - - uses: actions/upload-artifact@v3 - name: Save PR number - with: - name: pr_number - path: pr/ - retention-days: 5 -- GitLab From 4f3d43a0c4e75caf73c1034a85590f81a9ae3809 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 24 Apr 2024 17:49:33 +0200 Subject: [PATCH 067/107] Revert `execute_blob` and `send_blob` (#4266) Revert "pallet-xcm: Deprecate `execute` and `send` in favor of `execute_blob` and `send_blob` (#3749)" This reverts commit feee773d15d5237765b520b03854d46652181de5. --------- Co-authored-by: Adrian Catangiu Co-authored-by: Javier Bullrich --- Cargo.lock | 1 - .../emulated/chains/relays/westend/Cargo.toml | 1 + .../emulated/common/src/impls.rs | 6 +- .../assets/asset-hub-rococo/src/tests/send.rs | 8 +- .../assets/asset-hub-rococo/src/tests/swap.rs | 4 +- .../asset-hub-westend/src/tests/send.rs | 8 +- .../asset-hub-westend/src/tests/swap.rs | 4 +- .../bridge-hub-rococo/src/tests/send_xcm.rs | 7 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 18 +- .../bridges/bridge-hub-westend/Cargo.toml | 1 - .../bridge-hub-westend/src/tests/send_xcm.rs | 7 +- .../src/weights/pallet_xcm.rs | 112 ++++------ .../src/weights/pallet_xcm.rs | 30 --- .../src/weights/pallet_xcm.rs | 108 ++++----- .../src/weights/pallet_xcm.rs | 108 ++++----- .../src/weights/pallet_xcm.rs | 106 ++++----- .../coretime-rococo/src/weights/pallet_xcm.rs | 102 +++------ .../src/weights/pallet_xcm.rs | 102 +++------ .../people-rococo/src/weights/pallet_xcm.rs | 102 +++------ .../people-westend/src/weights/pallet_xcm.rs | 102 +++------ polkadot/runtime/rococo/src/impls.rs | 9 +- .../runtime/rococo/src/weights/pallet_xcm.rs | 110 ++++------ polkadot/runtime/westend/src/impls.rs | 9 +- .../runtime/westend/src/weights/pallet_xcm.rs | 108 ++++----- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 29 --- polkadot/xcm/pallet-xcm/src/lib.rs | 205 +++++------------- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 72 +++--- polkadot/xcm/src/lib.rs | 3 - polkadot/xcm/src/v4/mod.rs | 20 +- polkadot/xcm/xcm-builder/src/controller.rs | 39 ++-- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- .../src/parachain/contracts_config.rs | 14 +- .../frame/contracts/mock-network/src/tests.rs | 33 ++- substrate/frame/contracts/src/lib.rs | 3 - substrate/frame/contracts/src/wasm/runtime.rs | 54 ++++- substrate/frame/contracts/uapi/src/host.rs | 2 +- 36 files changed, 602 insertions(+), 1047 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62479cce2a0..ad7729d4b30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2222,7 +2222,6 @@ dependencies = [ "pallet-message-queue", "pallet-xcm", "parachains-common", - "parity-scale-codec", "rococo-westend-system-emulated-network", "sp-runtime", "staging-xcm", diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index 12a3ad60e0e..20aedb50e6a 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -11,6 +11,7 @@ publish = false workspace = true [dependencies] + # Substrate sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index c8a2f097abe..8f2789eb2f3 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -360,7 +360,7 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { recipient: $crate::impls::ParaId, call: $crate::impls::DoubleEncoded<()> ) { - use $crate::impls::{bx, Chain, RelayChain, Encode}; + use $crate::impls::{bx, Chain, RelayChain}; ::execute_with(|| { let root_origin = ::RuntimeOrigin::root(); @@ -368,10 +368,10 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); // Send XCM `Transact` - $crate::impls::assert_ok!(]>::XcmPallet::send_blob( + $crate::impls::assert_ok!(]>::XcmPallet::send( root_origin, bx!(destination.into()), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); Self::assert_xcm_pallet_sent(); }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 1d120f1dc4c..364fbd0d439 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index 919e0080ba6..ec48e400ff5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -372,10 +372,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( penpal_root, bx!(asset_hub_location), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index f218b539c38..eb0e985cc0c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 31f763be637..f6b65809886 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -371,10 +371,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( penpal_root, bx!(asset_hub_location), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 4bd041dc03f..a1d871cdb61 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -14,7 +14,6 @@ // limitations under the License. use crate::tests::*; -use codec::Encode; #[test] fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable() { @@ -27,7 +26,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm::<()>(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: WestendId.into(), @@ -39,10 +38,10 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index e332eb5bfda..d0c02e61134 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -82,7 +82,7 @@ fn create_agent() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let remote_xcm = VersionedXcm::from(Xcm::<()>(vec![ + let remote_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -95,10 +95,10 @@ fn create_agent() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - remote_xcm.encode().try_into().unwrap(), + bx!(remote_xcm), )); type RuntimeEvent = ::RuntimeEvent; @@ -140,7 +140,7 @@ fn create_channel() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let create_agent_xcm = VersionedXcm::from(Xcm::<()>(vec![ + let create_agent_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -153,7 +153,7 @@ fn create_channel() { let create_channel_call = SnowbridgeControl::Control(ControlCall::CreateChannel { mode: OperatingMode::Normal }); // Construct XCM to create a channel for para 1001 - let create_channel_xcm = VersionedXcm::from(Xcm::<()>(vec![ + let create_channel_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -166,16 +166,16 @@ fn create_channel() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin.clone(), bx!(destination.clone()), - create_agent_xcm.encode().try_into().unwrap(), + bx!(create_agent_xcm), )); - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - create_channel_xcm.encode().try_into().unwrap(), + bx!(create_channel_xcm), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 3aa2e2bcbe0..6aebf8862d6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,7 +11,6 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.0" } # Substrate frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index f69747c1770..b01be5e8dc8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -14,7 +14,6 @@ // limitations under the License. use crate::tests::*; -use codec::Encode; #[test] fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable() { @@ -27,7 +26,7 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm::<()>(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, @@ -39,10 +38,10 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index e0e231d7da2..51b6543bae8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_224_000 picoseconds. - Weight::from_parts(21_821_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 21_474_000 picoseconds. - Weight::from_parts(22_072_000, 0) + // Minimum execution time: 22_136_000 picoseconds. + Weight::from_parts(22_518_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 90_677_000 picoseconds. - Weight::from_parts(93_658_000, 0) + // Minimum execution time: 92_277_000 picoseconds. + Weight::from_parts(94_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -140,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 116_767_000 picoseconds. - Weight::from_parts(118_843_000, 0) + // Minimum execution time: 120_110_000 picoseconds. + Weight::from_parts(122_968_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -170,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 137_983_000 picoseconds. - Weight::from_parts(141_396_000, 0) + // Minimum execution time: 143_116_000 picoseconds. + Weight::from_parts(147_355_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -186,24 +164,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_232_000 picoseconds. - Weight::from_parts(6_507_000, 0) + // Minimum execution time: 6_517_000 picoseconds. + Weight::from_parts(6_756_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -213,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_884_000 picoseconds. - Weight::from_parts(2_016_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_024_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -240,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 26_637_000 picoseconds. - Weight::from_parts(27_616_000, 0) + // Minimum execution time: 27_314_000 picoseconds. + Weight::from_parts(28_787_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -266,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 28_668_000 picoseconds. - Weight::from_parts(29_413_000, 0) + // Minimum execution time: 29_840_000 picoseconds. + Weight::from_parts(30_589_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -278,8 +246,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_990_000 picoseconds. - Weight::from_parts(2_114_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_017_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -289,8 +257,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_856_000 picoseconds. - Weight::from_parts(19_430_000, 0) + // Minimum execution time: 19_211_000 picoseconds. + Weight::from_parts(19_552_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -301,8 +269,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 19_068_000 picoseconds. - Weight::from_parts(19_434_000, 0) + // Minimum execution time: 19_177_000 picoseconds. + Weight::from_parts(19_704_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -313,8 +281,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 21_055_000 picoseconds. - Weight::from_parts(21_379_000, 0) + // Minimum execution time: 20_449_000 picoseconds. + Weight::from_parts(21_075_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -336,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 25_736_000 picoseconds. - Weight::from_parts(26_423_000, 0) + // Minimum execution time: 26_578_000 picoseconds. + Weight::from_parts(27_545_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -348,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_853_000 picoseconds. - Weight::from_parts(12_215_000, 0) + // Minimum execution time: 11_646_000 picoseconds. + Weight::from_parts(11_944_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -359,8 +327,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_418_000 picoseconds. - Weight::from_parts(19_794_000, 0) + // Minimum execution time: 19_301_000 picoseconds. + Weight::from_parts(19_664_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -383,8 +351,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 34_719_000 picoseconds. - Weight::from_parts(35_260_000, 0) + // Minimum execution time: 35_715_000 picoseconds. + Weight::from_parts(36_915_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -397,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_937_000 picoseconds. - Weight::from_parts(5_203_000, 0) + // Minimum execution time: 4_871_000 picoseconds. + Weight::from_parts(5_066_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -409,8 +377,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_064_000 picoseconds. - Weight::from_parts(26_497_000, 0) + // Minimum execution time: 25_150_000 picoseconds. + Weight::from_parts(26_119_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -421,8 +389,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 37_132_000 picoseconds. - Weight::from_parts(37_868_000, 0) + // Minimum execution time: 38_248_000 picoseconds. + Weight::from_parts(39_122_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index a36c25f9604..be3d7661ab3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -70,28 +70,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 21_164_000 picoseconds. - Weight::from_parts(21_656_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -184,14 +162,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(7_791_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_585_000 picoseconds. - Weight::from_parts(7_897_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index adfaa9ea202..a732e1a5734 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 18_732_000 picoseconds. - Weight::from_parts(19_386_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 18_943_000 picoseconds. - Weight::from_parts(19_455_000, 0) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(19_156_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_917_000 picoseconds. - Weight::from_parts(91_611_000, 0) + // Minimum execution time: 88_096_000 picoseconds. + Weight::from_parts(89_732_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_587_000 picoseconds. - Weight::from_parts(90_303_000, 0) + // Minimum execution time: 88_239_000 picoseconds. + Weight::from_parts(89_729_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -164,24 +142,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_856_000 picoseconds. - Weight::from_parts(6_202_000, 0) + // Minimum execution time: 5_955_000 picoseconds. + Weight::from_parts(6_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_797_000 picoseconds. - Weight::from_parts(1_970_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(1_961_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_479_000 picoseconds. - Weight::from_parts(25_058_000, 0) + // Minimum execution time: 24_388_000 picoseconds. + Weight::from_parts(25_072_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -244,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 27_282_000 picoseconds. - Weight::from_parts(27_924_000, 0) + // Minimum execution time: 26_762_000 picoseconds. + Weight::from_parts(27_631_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -256,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_801_000 picoseconds. - Weight::from_parts(1_988_000, 0) + // Minimum execution time: 1_856_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -267,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_509_000 picoseconds. - Weight::from_parts(16_939_000, 0) + // Minimum execution time: 17_718_000 picoseconds. + Weight::from_parts(18_208_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_140_000 picoseconds. - Weight::from_parts(16_843_000, 0) + // Minimum execution time: 17_597_000 picoseconds. + Weight::from_parts(18_090_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -291,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_160_000 picoseconds. - Weight::from_parts(18_948_000, 0) + // Minimum execution time: 19_533_000 picoseconds. + Weight::from_parts(20_164_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -314,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 24_409_000 picoseconds. - Weight::from_parts(25_261_000, 0) + // Minimum execution time: 24_958_000 picoseconds. + Weight::from_parts(25_628_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -326,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_848_000 picoseconds. - Weight::from_parts(11_241_000, 0) + // Minimum execution time: 12_209_000 picoseconds. + Weight::from_parts(12_612_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -337,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_609_000 picoseconds. - Weight::from_parts(17_044_000, 0) + // Minimum execution time: 17_844_000 picoseconds. + Weight::from_parts(18_266_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -361,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 32_500_000 picoseconds. - Weight::from_parts(33_475_000, 0) + // Minimum execution time: 34_131_000 picoseconds. + Weight::from_parts(34_766_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -375,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_484_000 picoseconds. - Weight::from_parts(3_673_000, 0) + // Minimum execution time: 3_525_000 picoseconds. + Weight::from_parts(3_724_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -387,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_225_000 picoseconds. - Weight::from_parts(25_731_000, 0) + // Minimum execution time: 24_975_000 picoseconds. + Weight::from_parts(25_517_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -399,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_961_000 picoseconds. - Weight::from_parts(34_818_000, 0) + // Minimum execution time: 33_761_000 picoseconds. + Weight::from_parts(34_674_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 9cf4c61466a..a78ff2355ef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 19_702_000 picoseconds. - Weight::from_parts(20_410_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 19_525_000 picoseconds. - Weight::from_parts(20_071_000, 0) + // Minimum execution time: 19_527_000 picoseconds. + Weight::from_parts(19_839_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_793_000 picoseconds. - Weight::from_parts(93_761_000, 0) + // Minimum execution time: 90_938_000 picoseconds. + Weight::from_parts(92_822_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_819_000 picoseconds. - Weight::from_parts(93_198_000, 0) + // Minimum execution time: 90_133_000 picoseconds. + Weight::from_parts(92_308_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -164,24 +142,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_183_000 picoseconds. - Weight::from_parts(6_598_000, 0) + // Minimum execution time: 6_205_000 picoseconds. + Weight::from_parts(6_595_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_987_000 picoseconds. - Weight::from_parts(2_076_000, 0) + // Minimum execution time: 1_927_000 picoseconds. + Weight::from_parts(2_062_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_375_000 picoseconds. - Weight::from_parts(26_165_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_782_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -244,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 28_167_000 picoseconds. - Weight::from_parts(28_792_000, 0) + // Minimum execution time: 28_188_000 picoseconds. + Weight::from_parts(28_826_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -256,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_039_000 picoseconds. - Weight::from_parts(2_211_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -267,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_127_000 picoseconds. - Weight::from_parts(17_519_000, 0) + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(17_964_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_701_000 picoseconds. - Weight::from_parts(17_250_000, 0) + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(18_006_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -291,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_795_000 picoseconds. - Weight::from_parts(19_302_000, 0) + // Minimum execution time: 18_838_000 picoseconds. + Weight::from_parts(19_688_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -314,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 25_007_000 picoseconds. - Weight::from_parts(25_786_000, 0) + // Minimum execution time: 25_517_000 picoseconds. + Weight::from_parts(26_131_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -326,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_534_000 picoseconds. - Weight::from_parts(11_798_000, 0) + // Minimum execution time: 11_587_000 picoseconds. + Weight::from_parts(11_963_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -337,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_357_000 picoseconds. - Weight::from_parts(17_629_000, 0) + // Minimum execution time: 17_490_000 picoseconds. + Weight::from_parts(18_160_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -361,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 33_487_000 picoseconds. - Weight::from_parts(34_033_000, 0) + // Minimum execution time: 34_088_000 picoseconds. + Weight::from_parts(34_598_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -375,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_688_000 picoseconds. - Weight::from_parts(3_854_000, 0) + // Minimum execution time: 3_566_000 picoseconds. + Weight::from_parts(3_754_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -387,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_336_000 picoseconds. - Weight::from_parts(26_873_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_477_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -399,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_633_000 picoseconds. - Weight::from_parts(35_171_000, 0) + // Minimum execution time: 34_661_000 picoseconds. + Weight::from_parts(35_411_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 0edd5dfff2b..5d427d85004 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_911_000 picoseconds. - Weight::from_parts(22_431_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 22_143_000 picoseconds. - Weight::from_parts(22_843_000, 0) + // Minimum execution time: 21_813_000 picoseconds. + Weight::from_parts(22_332_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 96_273_000 picoseconds. - Weight::from_parts(98_351_000, 0) + // Minimum execution time: 93_243_000 picoseconds. + Weight::from_parts(95_650_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 95_571_000 picoseconds. - Weight::from_parts(96_251_000, 0) + // Minimum execution time: 96_199_000 picoseconds. + Weight::from_parts(98_620_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -164,24 +142,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_227_000 picoseconds. - Weight::from_parts(6_419_000, 0) + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_682_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_851_000 picoseconds. - Weight::from_parts(1_940_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_973_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_449_000 picoseconds. - Weight::from_parts(28_513_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_224_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -244,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_477_000 picoseconds. - Weight::from_parts(30_251_000, 0) + // Minimum execution time: 29_070_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -256,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_894_000 picoseconds. - Weight::from_parts(2_009_000, 0) + // Minimum execution time: 1_904_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -267,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 17_991_000 picoseconds. - Weight::from_parts(18_651_000, 0) + // Minimum execution time: 18_348_000 picoseconds. + Weight::from_parts(18_853_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 18_321_000 picoseconds. - Weight::from_parts(18_701_000, 0) + // Minimum execution time: 17_964_000 picoseconds. + Weight::from_parts(18_548_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -291,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 19_762_000 picoseconds. - Weight::from_parts(20_529_000, 0) + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_157_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -314,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_927_000 picoseconds. - Weight::from_parts(27_629_000, 0) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_314_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -326,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_957_000 picoseconds. - Weight::from_parts(12_119_000, 0) + // Minimum execution time: 11_929_000 picoseconds. + Weight::from_parts(12_304_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -337,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 17_942_000 picoseconds. - Weight::from_parts(18_878_000, 0) + // Minimum execution time: 18_599_000 picoseconds. + Weight::from_parts(19_195_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -361,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_640_000 picoseconds. - Weight::from_parts(36_340_000, 0) + // Minimum execution time: 35_524_000 picoseconds. + Weight::from_parts(36_272_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -376,7 +344,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `103` // Estimated: `1588` // Minimum execution time: 4_044_000 picoseconds. - Weight::from_parts(4_229_000, 0) + Weight::from_parts(4_238_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -387,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_262_000 picoseconds. - Weight::from_parts(26_842_000, 0) + // Minimum execution time: 25_741_000 picoseconds. + Weight::from_parts(26_301_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -399,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 36_775_000 picoseconds. - Weight::from_parts(37_265_000, 0) + // Minimum execution time: 35_925_000 picoseconds. + Weight::from_parts(36_978_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index df0044089c8..c5d315467c1 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_767_000 picoseconds. - Weight::from_parts(19_420_000, 0) - .saturating_add(Weight::from_parts(0, 3539)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `74` - // Estimated: `3539` - // Minimum execution time: 19_184_000 picoseconds. - Weight::from_parts(19_695_000, 0) + // Minimum execution time: 35_051_000 picoseconds. + Weight::from_parts(35_200_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 58_120_000 picoseconds. - Weight::from_parts(59_533_000, 0) + // Minimum execution time: 56_235_000 picoseconds. + Weight::from_parts(58_178_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_074_000 picoseconds. - Weight::from_parts(6_398_000, 0) + // Minimum execution time: 6_226_000 picoseconds. + Weight::from_parts(6_403_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_036_000 picoseconds. - Weight::from_parts(2_180_000, 0) + // Minimum execution time: 2_020_000 picoseconds. + Weight::from_parts(2_100_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 25_014_000 picoseconds. - Weight::from_parts(25_374_000, 0) + // Minimum execution time: 24_387_000 picoseconds. + Weight::from_parts(24_814_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_616_000 picoseconds. - Weight::from_parts(28_499_000, 0) + // Minimum execution time: 27_039_000 picoseconds. + Weight::from_parts(27_693_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_061_000 picoseconds. - Weight::from_parts(2_153_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(2_082_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_592_000 picoseconds. - Weight::from_parts(16_900_000, 0) + // Minimum execution time: 17_141_000 picoseconds. + Weight::from_parts(17_500_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_694_000 picoseconds. - Weight::from_parts(16_905_000, 0) + // Minimum execution time: 17_074_000 picoseconds. + Weight::from_parts(17_431_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_779_000 picoseconds. - Weight::from_parts(18_490_000, 0) + // Minimum execution time: 19_139_000 picoseconds. + Weight::from_parts(19_474_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_526_000 picoseconds. - Weight::from_parts(25_182_000, 0) + // Minimum execution time: 24_346_000 picoseconds. + Weight::from_parts(25_318_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_467_000 picoseconds. - Weight::from_parts(10_934_000, 0) + // Minimum execution time: 11_777_000 picoseconds. + Weight::from_parts(12_051_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_377_000 picoseconds. - Weight::from_parts(17_114_000, 0) + // Minimum execution time: 17_538_000 picoseconds. + Weight::from_parts(17_832_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 32_575_000 picoseconds. - Weight::from_parts(33_483_000, 0) + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_186_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_604_000 picoseconds. - Weight::from_parts(3_744_000, 0) + // Minimum execution time: 3_363_000 picoseconds. + Weight::from_parts(3_511_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_983_000 picoseconds. - Weight::from_parts(24_404_000, 0) + // Minimum execution time: 23_969_000 picoseconds. + Weight::from_parts(24_347_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_446_000 picoseconds. - Weight::from_parts(35_465_000, 0) + // Minimum execution time: 34_071_000 picoseconds. + Weight::from_parts(35_031_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index a1701c5f1c2..0082db3099d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_681_000 picoseconds. - Weight::from_parts(18_350_000, 0) - .saturating_add(Weight::from_parts(0, 3539)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `74` - // Estimated: `3539` - // Minimum execution time: 18_091_000 picoseconds. - Weight::from_parts(18_327_000, 0) + // Minimum execution time: 18_410_000 picoseconds. + Weight::from_parts(18_657_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 54_943_000 picoseconds. - Weight::from_parts(56_519_000, 0) + // Minimum execution time: 56_616_000 picoseconds. + Weight::from_parts(57_751_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_887_000 picoseconds. - Weight::from_parts(6_101_000, 0) + // Minimum execution time: 6_014_000 picoseconds. + Weight::from_parts(6_412_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_940_000 picoseconds. - Weight::from_parts(2_022_000, 0) + // Minimum execution time: 1_844_000 picoseconds. + Weight::from_parts(1_957_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 23_165_000 picoseconds. - Weight::from_parts(23_800_000, 0) + // Minimum execution time: 24_067_000 picoseconds. + Weight::from_parts(24_553_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 26_506_000 picoseconds. - Weight::from_parts(27_180_000, 0) + // Minimum execution time: 27_023_000 picoseconds. + Weight::from_parts(27_620_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_002_000, 0) + // Minimum execution time: 1_866_000 picoseconds. + Weight::from_parts(1_984_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_138_000 picoseconds. - Weight::from_parts(16_447_000, 0) + // Minimum execution time: 16_425_000 picoseconds. + Weight::from_parts(16_680_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_099_000 picoseconds. - Weight::from_parts(16_592_000, 0) + // Minimum execution time: 16_171_000 picoseconds. + Weight::from_parts(16_564_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_972_000 picoseconds. - Weight::from_parts(18_379_000, 0) + // Minimum execution time: 17_785_000 picoseconds. + Weight::from_parts(18_123_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_554_000 picoseconds. - Weight::from_parts(24_446_000, 0) + // Minimum execution time: 23_903_000 picoseconds. + Weight::from_parts(24_769_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_541_000 picoseconds. - Weight::from_parts(10_894_000, 0) + // Minimum execution time: 10_617_000 picoseconds. + Weight::from_parts(10_843_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_404_000 picoseconds. - Weight::from_parts(16_818_000, 0) + // Minimum execution time: 16_656_000 picoseconds. + Weight::from_parts(17_106_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 31_617_000 picoseconds. - Weight::from_parts(32_336_000, 0) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_547_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_328_000 picoseconds. - Weight::from_parts(3_501_000, 0) + // Minimum execution time: 3_439_000 picoseconds. + Weight::from_parts(3_619_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_571_000 picoseconds. - Weight::from_parts(24_312_000, 0) + // Minimum execution time: 24_657_000 picoseconds. + Weight::from_parts(24_971_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 32_879_000 picoseconds. - Weight::from_parts(33_385_000, 0) + // Minimum execution time: 34_028_000 picoseconds. + Weight::from_parts(34_697_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index ac494fdc719..fabce29b5fd 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_935_000 picoseconds. - Weight::from_parts(18_482_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 18_311_000 picoseconds. - Weight::from_parts(18_850_000, 0) + // Minimum execution time: 17_830_000 picoseconds. + Weight::from_parts(18_411_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 56_182_000 picoseconds. - Weight::from_parts(58_136_000, 0) + // Minimum execution time: 55_456_000 picoseconds. + Weight::from_parts(56_808_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_979_000 picoseconds. - Weight::from_parts(6_289_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_154_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_853_000 picoseconds. - Weight::from_parts(2_045_000, 0) + // Minimum execution time: 1_768_000 picoseconds. + Weight::from_parts(1_914_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_827_000 picoseconds. - Weight::from_parts(24_493_000, 0) + // Minimum execution time: 24_120_000 picoseconds. + Weight::from_parts(24_745_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_755_000 picoseconds. - Weight::from_parts(27_125_000, 0) + // Minimum execution time: 26_630_000 picoseconds. + Weight::from_parts(27_289_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_898_000 picoseconds. - Weight::from_parts(2_028_000, 0) + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_946_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_300_000 picoseconds. - Weight::from_parts(16_995_000, 0) + // Minimum execution time: 16_586_000 picoseconds. + Weight::from_parts(16_977_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_495_000 picoseconds. - Weight::from_parts(16_950_000, 0) + // Minimum execution time: 16_923_000 picoseconds. + Weight::from_parts(17_415_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_153_000 picoseconds. - Weight::from_parts(18_595_000, 0) + // Minimum execution time: 18_596_000 picoseconds. + Weight::from_parts(18_823_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_387_000 picoseconds. - Weight::from_parts(24_677_000, 0) + // Minimum execution time: 23_817_000 picoseconds. + Weight::from_parts(24_520_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_939_000 picoseconds. - Weight::from_parts(11_210_000, 0) + // Minimum execution time: 11_042_000 picoseconds. + Weight::from_parts(11_578_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_850_000 picoseconds. - Weight::from_parts(17_195_000, 0) + // Minimum execution time: 17_306_000 picoseconds. + Weight::from_parts(17_817_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_931_000 picoseconds. - Weight::from_parts(32_494_000, 0) + // Minimum execution time: 32_141_000 picoseconds. + Weight::from_parts(32_954_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_514_000 picoseconds. - Weight::from_parts(3_709_000, 0) + // Minimum execution time: 3_410_000 picoseconds. + Weight::from_parts(3_556_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_863_000 picoseconds. - Weight::from_parts(25_293_000, 0) + // Minimum execution time: 25_021_000 picoseconds. + Weight::from_parts(25_240_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_799_000 picoseconds. - Weight::from_parts(34_665_000, 0) + // Minimum execution time: 33_801_000 picoseconds. + Weight::from_parts(34_655_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index 62a9c802808..c337289243b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_450_000 picoseconds. - Weight::from_parts(17_913_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 18_082_000 picoseconds. - Weight::from_parts(18_293_000, 0) + // Minimum execution time: 17_856_000 picoseconds. + Weight::from_parts(18_473_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 54_939_000 picoseconds. - Weight::from_parts(55_721_000, 0) + // Minimum execution time: 56_112_000 picoseconds. + Weight::from_parts(57_287_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_789_000 picoseconds. - Weight::from_parts(5_995_000, 0) + // Minimum execution time: 6_186_000 picoseconds. + Weight::from_parts(6_420_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_795_000 picoseconds. - Weight::from_parts(1_924_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_999_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_445_000 picoseconds. - Weight::from_parts(23_906_000, 0) + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_636_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_590_000 picoseconds. - Weight::from_parts(27_056_000, 0) + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(27_275_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_889_000 picoseconds. - Weight::from_parts(1_962_000, 0) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_040_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_408_000 picoseconds. - Weight::from_parts(16_877_000, 0) + // Minimum execution time: 16_832_000 picoseconds. + Weight::from_parts(17_312_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_791_000 picoseconds. - Weight::from_parts(17_111_000, 0) + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_123_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_355_000 picoseconds. - Weight::from_parts(19_110_000, 0) + // Minimum execution time: 18_164_000 picoseconds. + Weight::from_parts(18_580_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_354_000 picoseconds. - Weight::from_parts(23_999_000, 0) + // Minimum execution time: 23_577_000 picoseconds. + Weight::from_parts(24_324_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_065_000 picoseconds. - Weight::from_parts(11_302_000, 0) + // Minimum execution time: 11_014_000 picoseconds. + Weight::from_parts(11_223_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_998_000 picoseconds. - Weight::from_parts(17_509_000, 0) + // Minimum execution time: 16_887_000 picoseconds. + Weight::from_parts(17_361_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_068_000 picoseconds. - Weight::from_parts(31_978_000, 0) + // Minimum execution time: 31_705_000 picoseconds. + Weight::from_parts(32_166_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_478_000 picoseconds. - Weight::from_parts(3_595_000, 0) + // Minimum execution time: 3_568_000 picoseconds. + Weight::from_parts(3_669_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_962_000 picoseconds. - Weight::from_parts(25_404_000, 0) + // Minimum execution time: 24_823_000 picoseconds. + Weight::from_parts(25_344_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 32_685_000 picoseconds. - Weight::from_parts(33_592_000, 0) + // Minimum execution time: 34_516_000 picoseconds. + Weight::from_parts(35_478_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index cf364b6ac79..ac7100d7858 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -167,16 +167,11 @@ where }, ]); - let encoded_versioned_xcm = - VersionedXcm::V4(program).encode().try_into().map_err(|error| { - log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); - pallet_xcm::Error::::XcmTooLarge - })?; // send - let _ = >::send_blob( + let _ = >::send( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - encoded_versioned_xcm, + Box::new(VersionedXcm::V4(program)), )?; Ok(()) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 42972baa1c8..5544ca44658 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,26 +60,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 24_724_000 picoseconds. - Weight::from_parts(25_615_000, 0) - .saturating_add(Weight::from_parts(0, 3645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) - /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `180` - // Estimated: `3645` - // Minimum execution time: 24_709_000 picoseconds. - Weight::from_parts(25_326_000, 0) + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_682_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -98,8 +80,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 106_600_000 picoseconds. - Weight::from_parts(110_781_000, 0) + // Minimum execution time: 107_570_000 picoseconds. + Weight::from_parts(109_878_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +100,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 103_030_000 picoseconds. - Weight::from_parts(106_018_000, 0) + // Minimum execution time: 106_341_000 picoseconds. + Weight::from_parts(109_135_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -138,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_017_000 picoseconds. - Weight::from_parts(109_214_000, 0) + // Minimum execution time: 108_372_000 picoseconds. + Weight::from_parts(112_890_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,16 +130,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_864_000 picoseconds. - Weight::from_parts(7_135_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_955_000 picoseconds. - Weight::from_parts(7_165_000, 0) + // Minimum execution time: 6_957_000 picoseconds. + Weight::from_parts(7_417_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -166,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_827_000 picoseconds. - Weight::from_parts(7_211_000, 0) + // Minimum execution time: 7_053_000 picoseconds. + Weight::from_parts(7_462_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -175,8 +149,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_788_000 picoseconds. - Weight::from_parts(2_021_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_037_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -197,8 +171,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_627_000 picoseconds. - Weight::from_parts(31_350_000, 0) + // Minimum execution time: 30_417_000 picoseconds. + Weight::from_parts(31_191_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -219,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_688_000 picoseconds. - Weight::from_parts(37_345_000, 0) + // Minimum execution time: 36_666_000 picoseconds. + Weight::from_parts(37_779_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -231,8 +205,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_829_000 picoseconds. - Weight::from_parts(1_986_000, 0) + // Minimum execution time: 1_869_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -242,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_104_000 picoseconds. - Weight::from_parts(16_464_000, 0) + // Minimum execution time: 16_188_000 picoseconds. + Weight::from_parts(16_435_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -254,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_267_000 picoseconds. - Weight::from_parts(16_675_000, 0) + // Minimum execution time: 16_431_000 picoseconds. + Weight::from_parts(16_935_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -266,8 +240,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_487_000 picoseconds. - Weight::from_parts(19_102_000, 0) + // Minimum execution time: 18_460_000 picoseconds. + Weight::from_parts(18_885_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -285,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_603_000 picoseconds. - Weight::from_parts(31_002_000, 0) + // Minimum execution time: 29_623_000 picoseconds. + Weight::from_parts(30_661_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +271,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_183_000 picoseconds. - Weight::from_parts(12_587_000, 0) + // Minimum execution time: 12_043_000 picoseconds. + Weight::from_parts(12_360_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -308,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_372_000 picoseconds. - Weight::from_parts(16_967_000, 0) + // Minimum execution time: 16_511_000 picoseconds. + Weight::from_parts(17_011_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +302,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 38_904_000 picoseconds. - Weight::from_parts(39_983_000, 0) + // Minimum execution time: 39_041_000 picoseconds. + Weight::from_parts(39_883_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -342,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_067_000 picoseconds. - Weight::from_parts(2_195_000, 0) + // Minimum execution time: 2_030_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -354,8 +328,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 23_982_000 picoseconds. - Weight::from_parts(24_409_000, 0) + // Minimum execution time: 22_615_000 picoseconds. + Weight::from_parts(23_008_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -366,8 +340,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 33_430_000 picoseconds. - Weight::from_parts(34_433_000, 0) + // Minimum execution time: 34_438_000 picoseconds. + Weight::from_parts(35_514_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index d8741c939a5..71e6b696a20 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -167,16 +167,11 @@ where }, ]); - let encoded_versioned_xcm = - VersionedXcm::V4(program).encode().try_into().map_err(|error| { - log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); - pallet_xcm::Error::::XcmTooLarge - })?; // send - let _ = >::send_blob( + let _ = >::send( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - encoded_versioned_xcm, + Box::new(VersionedXcm::V4(program)), )?; Ok(()) } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 80bc551ba1e..10725cecf24 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,26 +60,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 24_535_000 picoseconds. - Weight::from_parts(25_618_000, 0) - .saturating_add(Weight::from_parts(0, 3612)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) - /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `147` - // Estimated: `3612` - // Minimum execution time: 25_376_000 picoseconds. - Weight::from_parts(26_180_000, 0) + // Minimum execution time: 25_725_000 picoseconds. + Weight::from_parts(26_174_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -98,8 +80,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 108_786_000 picoseconds. - Weight::from_parts(112_208_000, 0) + // Minimum execution time: 113_140_000 picoseconds. + Weight::from_parts(116_204_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -118,8 +100,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `6196` - // Minimum execution time: 105_190_000 picoseconds. - Weight::from_parts(107_140_000, 0) + // Minimum execution time: 108_571_000 picoseconds. + Weight::from_parts(110_650_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -138,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 109_027_000 picoseconds. - Weight::from_parts(111_404_000, 0) + // Minimum execution time: 111_836_000 picoseconds. + Weight::from_parts(114_435_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -154,24 +136,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_668_000 picoseconds. - Weight::from_parts(7_013_000, 0) + // Minimum execution time: 7_160_000 picoseconds. + Weight::from_parts(7_477_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_740_000 picoseconds. - Weight::from_parts(1_884_000, 0) + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -201,8 +173,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 30_200_000 picoseconds. - Weight::from_parts(30_768_000, 0) + // Minimum execution time: 31_123_000 picoseconds. + Weight::from_parts(31_798_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -223,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `327` // Estimated: `3792` - // Minimum execution time: 33_928_000 picoseconds. - Weight::from_parts(35_551_000, 0) + // Minimum execution time: 35_175_000 picoseconds. + Weight::from_parts(36_098_000, 0) .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -235,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_759_000 picoseconds. - Weight::from_parts(1_880_000, 0) + // Minimum execution time: 1_974_000 picoseconds. + Weight::from_parts(2_096_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -246,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_507_000 picoseconds. - Weight::from_parts(17_219_000, 0) + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_170_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -258,8 +230,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_633_000 picoseconds. - Weight::from_parts(16_889_000, 0) + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_447_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -270,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 19_297_000 picoseconds. - Weight::from_parts(19_820_000, 0) + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_659_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -289,8 +261,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `6123` - // Minimum execution time: 30_364_000 picoseconds. - Weight::from_parts(31_122_000, 0) + // Minimum execution time: 30_699_000 picoseconds. + Weight::from_parts(31_537_000, 0) .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -301,8 +273,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 11_997_000 picoseconds. - Weight::from_parts(12_392_000, 0) + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_670_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -312,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_894_000 picoseconds. - Weight::from_parts(17_452_000, 0) + // Minimum execution time: 17_129_000 picoseconds. + Weight::from_parts(17_668_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -332,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `13548` - // Minimum execution time: 39_864_000 picoseconds. - Weight::from_parts(40_859_000, 0) + // Minimum execution time: 39_960_000 picoseconds. + Weight::from_parts(41_068_000, 0) .saturating_add(Weight::from_parts(0, 13548)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -346,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_363_000 picoseconds. - Weight::from_parts(2_519_000, 0) + // Minimum execution time: 2_333_000 picoseconds. + Weight::from_parts(2_504_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -358,8 +330,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_409_000 picoseconds. - Weight::from_parts(22_776_000, 0) + // Minimum execution time: 22_932_000 picoseconds. + Weight::from_parts(23_307_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -370,8 +342,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 33_551_000 picoseconds. - Weight::from_parts(34_127_000, 0) + // Minimum execution time: 34_558_000 picoseconds. + Weight::from_parts(35_299_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 5d2e0f7b96f..081a4235b77 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,7 +16,6 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; @@ -101,21 +100,6 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) - send_blob { - let send_origin = - T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - let msg = Xcm::<()>(vec![ClearOrigin]); - let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); - let versioned_msg = VersionedXcm::from(msg); - let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); - }: _>(send_origin, Box::new(versioned_dest), encoded_versioned_msg) - teleport_assets { let (asset, destination) = T::teleportable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), @@ -263,19 +247,6 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) - execute_blob { - let execute_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) - .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - let msg = Xcm(vec![ClearOrigin]); - if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - let versioned_msg = VersionedXcm::from(msg); - let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); - }: _>(execute_origin, encoded_versioned_msg, Weight::MAX) - force_xcm_version { let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index f6c301d5b04..af3b66121ea 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -50,8 +50,8 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, - QueryControllerWeightInfo, SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, + SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ @@ -87,8 +87,6 @@ pub trait WeightInfo { fn new_query() -> Weight; fn take_response() -> Weight; fn claim_assets() -> Weight; - fn execute_blob() -> Weight; - fn send_blob() -> Weight; } /// fallback implementation @@ -173,14 +171,6 @@ impl WeightInfo for TestWeightInfo { fn claim_assets() -> Weight { Weight::from_parts(100_000_000, 0) } - - fn execute_blob() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn send_blob() -> Weight { - Weight::from_parts(100_000_000, 0) - } } #[frame_support::pallet] @@ -296,49 +286,76 @@ pub mod pallet { } impl ExecuteControllerWeightInfo for Pallet { - fn execute_blob() -> Weight { - T::WeightInfo::execute_blob() + fn execute() -> Weight { + T::WeightInfo::execute() } } impl ExecuteController, ::RuntimeCall> for Pallet { type WeightInfo = Self; - fn execute_blob( + fn execute( origin: OriginFor, - encoded_message: BoundedVec, + message: Box::RuntimeCall>>, max_weight: Weight, ) -> Result { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let message = - VersionedXcm::<::RuntimeCall>::decode(&mut &encoded_message[..]) - .map_err(|error| { - log::error!(target: "xcm::execute_blob", "Unable to decode XCM, error: {:?}", error); - Error::::UnableToDecode - })?; - Self::execute_base(origin_location, Box::new(message), max_weight) + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + let outcome = (|| { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + Ok(T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + max_weight, + max_weight, + )) + })() + .map_err(|e: DispatchError| { + e.with_weight(::execute()) + })?; + + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete.with_weight( + weight_used.saturating_add( + ::execute(), + ), + ) + })?; + Ok(weight_used) } } impl SendControllerWeightInfo for Pallet { - fn send_blob() -> Weight { - T::WeightInfo::send_blob() + fn send() -> Weight { + T::WeightInfo::send() } } impl SendController> for Pallet { type WeightInfo = Self; - fn send_blob( + fn send( origin: OriginFor, dest: Box, - encoded_message: BoundedVec, + message: Box>, ) -> Result { let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let message = - VersionedXcm::<()>::decode(&mut &encoded_message[..]).map_err(|error| { - log::error!(target: "xcm::send_blob", "Unable to decode XCM, error: {:?}", error); - Error::::UnableToDecode - })?; - Self::send_base(origin_location, dest, Box::new(message)) + let interior: Junctions = + origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) + .map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) } } @@ -547,13 +564,6 @@ pub mod pallet { /// Local XCM execution incomplete. #[codec(index = 24)] LocalExecutionIncomplete, - /// Could not decode XCM. - #[codec(index = 25)] - UnableToDecode, - /// XCM encoded length is too large. - /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. - #[codec(index = 26)] - XcmTooLarge, } impl From for Error { @@ -890,72 +900,15 @@ pub mod pallet { } } - impl Pallet { - /// Underlying logic for both [`execute_blob`] and [`execute`]. - fn execute_base( - origin_location: Location, - message: Box::RuntimeCall>>, - max_weight: Weight, - ) -> Result { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); - let outcome = (|| { - let mut hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - Ok(T::XcmExecutor::prepare_and_execute( - origin_location, - message, - &mut hash, - max_weight, - max_weight, - )) - })() - .map_err(|e: DispatchError| e.with_weight(T::WeightInfo::execute()))?; - - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - let weight_used = outcome.weight_used(); - outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); - Error::::LocalExecutionIncomplete - .with_weight(weight_used.saturating_add(T::WeightInfo::execute())) - })?; - Ok(weight_used) - } - - /// Underlying logic for both [`send_blob`] and [`send`]. - fn send_base( - origin_location: Location, - dest: Box, - message: Box>, - ) -> Result { - let interior: Junctions = - origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); - Ok(message_id) - } - } - #[pallet::call(weight(::WeightInfo))] impl Pallet { - /// WARNING: DEPRECATED. `send` will be removed after June 2024. Use `send_blob` instead. - #[allow(deprecated)] - #[deprecated(note = "`send` will be removed after June 2024. Use `send_blob` instead.")] #[pallet::call_index(0)] pub fn send( origin: OriginFor, dest: Box, message: Box>, ) -> DispatchResult { - let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - Self::send_base(origin_location, dest, message)?; + >::send(origin, dest, message)?; Ok(()) } @@ -1052,13 +1005,6 @@ pub mod pallet { /// No more than `max_weight` will be used in its attempted execution. If this is less than /// the maximum amount of weight that the message could take to be executed, then no /// execution attempt will be made. - /// - /// WARNING: DEPRECATED. `execute` will be removed after June 2024. Use `execute_blob` - /// instead. - #[allow(deprecated)] - #[deprecated( - note = "`execute` will be removed after June 2024. Use `execute_blob` instead." - )] #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1066,8 +1012,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let weight_used = Self::execute_base(origin_location, message, max_weight)?; + let weight_used = + >::execute(origin, message, max_weight)?; Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } @@ -1362,47 +1308,6 @@ pub mod pallet { Ok(()) } - /// Execute an XCM from a local, signed, origin. - /// - /// An event is deposited indicating whether the message could be executed completely - /// or only partially. - /// - /// No more than `max_weight` will be used in its attempted execution. If this is less than - /// the maximum amount of weight that the message could take to be executed, then no - /// execution attempt will be made. - /// - /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. - #[pallet::call_index(13)] - #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute_blob()))] - pub fn execute_blob( - origin: OriginFor, - encoded_message: BoundedVec, - max_weight: Weight, - ) -> DispatchResultWithPostInfo { - let weight_used = >::execute_blob( - origin, - encoded_message, - max_weight, - )?; - Ok(Some(weight_used.saturating_add(T::WeightInfo::execute_blob())).into()) - } - - /// Send an XCM from a local, signed, origin. - /// - /// The destination, `dest`, will receive this message with a `DescendOrigin` instruction - /// that makes the origin of the message be the origin on this system. - /// - /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. - #[pallet::call_index(14)] - pub fn send_blob( - origin: OriginFor, - dest: Box, - encoded_message: BoundedVec, - ) -> DispatchResult { - >::send_blob(origin, dest, encoded_message)?; - Ok(()) - } - /// Transfer assets from the local chain to the destination chain using explicit transfer /// types for assets and fees. /// @@ -1451,7 +1356,7 @@ pub mod pallet { /// - `custom_xcm_on_dest`: The XCM to be executed on `dest` chain as the last step of the /// transfer, which also determines what happens to the assets on the destination chain. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. - #[pallet::call_index(15)] + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::transfer_assets())] pub fn transfer_assets_using_type_and_then( origin: OriginFor, diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 1147635081a..8faf16e0d2a 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -20,10 +20,10 @@ pub(crate) mod assets_transfer; use crate::{ mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - LatestVersionedLocation, Pallet, Queries, QueryStatus, VersionDiscoveryQueue, - VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, + ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, + VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, + WeightInfo, }; -use codec::Encode; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -305,12 +305,11 @@ fn send_works() { ]); let versioned_dest = Box::new(RelayLocation::get().into()); - let versioned_message = VersionedXcm::from(message.clone()); - let encoded_versioned_message = versioned_message.encode().try_into().unwrap(); - assert_ok!(XcmPallet::send_blob( + let versioned_message = Box::new(VersionedXcm::from(message.clone())); + assert_ok!(XcmPallet::send( RuntimeOrigin::signed(ALICE), versioned_dest, - encoded_versioned_message + versioned_message )); let sent_message = Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) .into_iter() @@ -342,16 +341,16 @@ fn send_fails_when_xcm_router_blocks() { ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm::<()>(vec![ + let message = Xcm(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), buy_execution((Parent, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); assert_noop!( - XcmPallet::send_blob( + XcmPallet::send( RuntimeOrigin::signed(ALICE), Box::new(Location::ancestor(8).into()), - VersionedXcm::from(message.clone()).encode().try_into().unwrap(), + Box::new(VersionedXcm::from(message.clone())), ), crate::Error::::SendFailure ); @@ -372,16 +371,13 @@ fn execute_withdraw_to_deposit_works() { let weight = BaseXcmWeight::get() * 3; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -403,21 +399,18 @@ fn trapped_assets_can_be_claimed() { let weight = BaseXcmWeight::get() * 6; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), // Don't propagated the error into the result. - SetErrorHandler(Xcm::(vec![ClearError])), + SetErrorHandler(Xcm(vec![ClearError])), // This will make an error. Trap(0), // This would succeed, but we never get to it. DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight )); let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); @@ -444,16 +437,13 @@ fn trapped_assets_can_be_claimed() { assert_eq!(trapped, expected); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight )); @@ -463,16 +453,13 @@ fn trapped_assets_can_be_claimed() { // Can't claim twice. assert_err_ignore_postinfo!( - XcmPallet::execute_blob( + XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight ), Error::::LocalExecutionIncomplete @@ -489,9 +476,9 @@ fn claim_assets_works() { let trapping_program = Xcm::::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT)).build(); // Even though assets are trapped, the extrinsic returns success. - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::V4(trapping_program).encode().try_into().unwrap(), + Box::new(VersionedXcm::V4(trapping_program)), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -544,9 +531,9 @@ fn incomplete_execute_reverts_side_effects() { assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); let assets: Assets = (Here, amount_to_send).into(); - let result = XcmPallet::execute_blob( + let result = XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ // Withdraw + BuyExec + Deposit should work WithdrawAsset(assets.clone()), buy_execution(assets.inner()[0].clone()), @@ -554,10 +541,7 @@ fn incomplete_execute_reverts_side_effects() { // Withdrawing once more will fail because of InsufficientBalance, and we expect to // revert the effects of the above instructions as well WithdrawAsset(assets), - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight, ); // all effects are reverted and balances unchanged for either sender or receiver @@ -569,7 +553,7 @@ fn incomplete_execute_reverts_side_effects() { Err(sp_runtime::DispatchErrorWithPostInfo { post_info: frame_support::dispatch::PostDispatchInfo { actual_weight: Some( - <::WeightInfo>::execute_blob() + weight + as ExecuteControllerWeightInfo>::execute() + weight ), pays_fee: frame_support::dispatch::Pays::Yes, }, diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 198020ea126..513dfe5501b 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -48,9 +48,6 @@ mod tests; /// Maximum nesting level for XCM decoding. pub const MAX_XCM_DECODE_DEPTH: u32 = 8; -/// Maximum encoded size. -/// See `decoding_respects_limit` test for more reasoning behind this value. -pub const MAX_XCM_ENCODED_SIZE: u32 = 12402; /// A version of XCM. pub type Version = u32; diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 6635408282e..30ee485589a 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1488,21 +1488,7 @@ mod tests { let encoded = big_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let mut many_assets = Assets::new(); - for index in 0..MAX_ITEMS_IN_ASSETS { - many_assets.push((GeneralIndex(index as u128), 1u128).into()); - } - - let full_xcm_pass = - Xcm::<()>(vec![ - TransferAsset { assets: many_assets, beneficiary: Here.into() }; - MAX_INSTRUCTIONS_TO_DECODE as usize - ]); - let encoded = full_xcm_pass.encode(); - assert_eq!(encoded.len(), 12402); - assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); - - let nested_xcm_fail = Xcm::<()>(vec![ + let nested_xcm = Xcm::<()>(vec![ DepositReserveAsset { assets: All.into(), dest: Here.into(), @@ -1510,10 +1496,10 @@ mod tests { }; (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize ]); - let encoded = nested_xcm_fail.encode(); + let encoded = nested_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm_fail); 64]); + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); let encoded = even_more_nested_xcm.encode(); assert_eq!(encoded.len(), 342530); // This should not decode since the limit is 100 diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 6bdde2a967d..04b19eaa587 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,7 +21,6 @@ use frame_support::{ dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, pallet_prelude::DispatchError, - parameter_types, BoundedVec, }; use sp_std::boxed::Box; use xcm::prelude::*; @@ -42,12 +41,8 @@ impl Controller f /// Weight functions needed for [`ExecuteController`]. pub trait ExecuteControllerWeightInfo { - /// Weight for [`ExecuteController::execute_blob`] - fn execute_blob() -> Weight; -} - -parameter_types! { - pub const MaxXcmEncodedSize: u32 = xcm::MAX_XCM_ENCODED_SIZE; + /// Weight for [`ExecuteController::execute`] + fn execute() -> Weight; } /// Execute an XCM locally, for a given origin. @@ -66,19 +61,19 @@ pub trait ExecuteController { /// # Parameters /// /// - `origin`: the origin of the call. - /// - `msg`: the encoded XCM to be executed, should be decodable as a [`VersionedXcm`] + /// - `message`: the XCM program to be executed. /// - `max_weight`: the maximum weight that can be consumed by the execution. - fn execute_blob( + fn execute( origin: Origin, - message: BoundedVec, + message: Box>, max_weight: Weight, ) -> Result; } /// Weight functions needed for [`SendController`]. pub trait SendControllerWeightInfo { - /// Weight for [`SendController::send_blob`] - fn send_blob() -> Weight; + /// Weight for [`SendController::send`] + fn send() -> Weight; } /// Send an XCM from a given origin. @@ -98,11 +93,11 @@ pub trait SendController { /// /// - `origin`: the origin of the call. /// - `dest`: the destination of the message. - /// - `msg`: the encoded XCM to be sent, should be decodable as a [`VersionedXcm`] - fn send_blob( + /// - `msg`: the XCM to be sent. + fn send( origin: Origin, dest: Box, - message: BoundedVec, + message: Box>, ) -> Result; } @@ -142,35 +137,35 @@ pub trait QueryController: QueryHandler { impl ExecuteController for () { type WeightInfo = (); - fn execute_blob( + fn execute( _origin: Origin, - _message: BoundedVec, + _message: Box>, _max_weight: Weight, ) -> Result { - Err(DispatchError::Other("ExecuteController::execute_blob not implemented") + Err(DispatchError::Other("ExecuteController::execute not implemented") .with_weight(Weight::zero())) } } impl ExecuteControllerWeightInfo for () { - fn execute_blob() -> Weight { + fn execute() -> Weight { Weight::zero() } } impl SendController for () { type WeightInfo = (); - fn send_blob( + fn send( _origin: Origin, _dest: Box, - _message: BoundedVec, + _message: Box>, ) -> Result { Ok(Default::default()) } } impl SendControllerWeightInfo for () { - fn send_blob() -> Weight { + fn send() -> Weight { Weight::zero() } } diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 977da9a55de..1ba38d0db83 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -44,7 +44,7 @@ pub use barriers::{ mod controller; pub use controller::{ - Controller, ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, + Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index bf3c00b3ff1..20fdd9a243d 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -14,9 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; -use crate::parachain::RuntimeHoldReason; -use frame_support::{derive_impl, parameter_types}; +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeHoldReason}; +use frame_support::{derive_impl, parameter_types, traits::Contains}; parameter_types! { pub Schedule: pallet_contracts::Schedule = Default::default(); @@ -29,5 +28,14 @@ impl pallet_contracts::Config for Runtime { type Currency = Balances; type Schedule = Schedule; type Time = super::Timestamp; + type CallFilter = CallFilter; type Xcm = pallet_xcm::Pallet; } + +/// In this mock, we only allow other contract calls via XCM. +pub struct CallFilter; +impl Contains for CallFilter { + fn contains(call: &RuntimeCall) -> bool { + matches!(call, RuntimeCall::Contracts(pallet_contracts::Call::call { .. })) + } +} diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index 5632f75e787..e7d1f6279aa 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -22,7 +22,10 @@ use crate::{ relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; -use frame_support::traits::{fungibles::Mutate, Currency}; +use frame_support::{ + assert_err, + traits::{fungibles::Mutate, Currency}, +}; use pallet_contracts::{test_utils::builder::*, Code}; use pallet_contracts_fixtures::compile_module; use pallet_contracts_uapi::ReturnErrorCode; @@ -81,7 +84,7 @@ fn test_xcm_execute() { .build(); let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode().encode()) + .data(VersionedXcm::V4(message).encode()) .build(); assert_eq!(result.gas_consumed, result.gas_required); @@ -118,7 +121,7 @@ fn test_xcm_execute_incomplete() { .build(); let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode().encode()) + .data(VersionedXcm::V4(message).encode()) .build(); assert_eq!(result.gas_consumed, result.gas_required); @@ -129,6 +132,26 @@ fn test_xcm_execute_incomplete() { }); } +#[test] +fn test_xcm_execute_filtered_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + // `remark` should be rejected, as it is not allowed by our CallFilter. + let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let message: Xcm = Xcm::builder_unsafe() + .transact(OriginKind::Native, Weight::MAX, call.encode()) + .build(); + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode()) + .build() + .result; + assert_err!(result, frame_system::Error::::CallFiltered); + }); +} + #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); @@ -151,7 +174,7 @@ fn test_xcm_execute_reentrant_call() { .build(); let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode().encode()) + .data(VersionedXcm::V4(message).encode()) .build_and_unwrap_result(); assert_return_code!(&result, ReturnErrorCode::XcmExecutionFailed); @@ -182,7 +205,7 @@ fn test_xcm_send() { .build(); let result = bare_call(contract_addr.clone()) - .data((dest, VersionedXcm::V4(message).encode()).encode()) + .data((dest, VersionedXcm::V4(message)).encode()) .build_and_unwrap_result(); let mut data = &result.data[..]; diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index b381fd2dc4f..20cf7d1651c 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -307,9 +307,6 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. - /// - /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact - /// calls, you must configure them separately within the XCM pallet itself. #[pallet::no_default_bounds] type CallFilter: Contains<::RuntimeCall>; diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 28a08ab0224..160dfa0d2f3 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -25,8 +25,12 @@ use crate::{ }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, - traits::Get, weights::Weight, + dispatch::DispatchInfo, + ensure, + pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, + parameter_types, + traits::Get, + weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -37,6 +41,9 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; +use xcm::VersionedXcm; + +type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -371,6 +378,29 @@ fn already_charged(_: u32) -> Option { None } +/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] +/// instruction with a call that is not allowed by the CallFilter. +fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { + use frame_support::traits::Contains; + use xcm::prelude::{Transact, Xcm}; + + let mut message: Xcm> = + message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; + + message.iter_mut().try_for_each(|inst| -> DispatchResult { + let Transact { ref mut call, .. } = inst else { return Ok(()) }; + let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; + + if !::CallFilter::contains(call) { + return Err(frame_system::Error::::CallFiltered.into()) + } + + Ok(()) + })?; + + Ok(()) +} + /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2082,13 +2112,16 @@ pub mod env { msg_len: u32, ) -> Result { use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let message: VersionedXcm> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + ensure_executable::(&message)?; let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute_blob(); + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); let dispatch_info = DispatchInfo { weight, ..Default::default() }; @@ -2097,9 +2130,9 @@ pub mod env { RuntimeCosts::CallXcmExecute, |ctx| { let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - let weight_used = <::Xcm>::execute_blob( + let weight_used = <::Xcm>::execute( origin, - message, + Box::new(message), weight.saturating_sub(execute_weight), )?; @@ -2119,18 +2152,19 @@ pub mod env { msg_len: u32, output_ptr: u32, ) -> Result { - use xcm::VersionedLocation; + use xcm::{VersionedLocation, VersionedXcm}; use xcm_builder::{SendController, SendControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let dest: VersionedLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; - let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - let weight = <::Xcm as SendController<_>>::WeightInfo::send_blob(); + let message: VersionedXcm<()> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - match <::Xcm>::send_blob(origin, dest.into(), message) { + match <::Xcm>::send(origin, dest.into(), message.into()) { Ok(message_id) => { ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; Ok(ReturnErrorCode::Success) diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 459cb59bead..92065eda5d6 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -790,7 +790,7 @@ pub trait HostFn { /// /// # Parameters /// - /// - `dest`: The XCM destination, should be decodable as [MultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), + /// - `dest`: The XCM destination, should be decodable as [VersionedLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), /// traps otherwise. /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), /// traps otherwise. -- GitLab From a633e954f3b88697aa797d9792e8a5b5cf310b7e Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 25 Apr 2024 08:26:16 +0300 Subject: [PATCH 068/107] Bridge: make some headers submissions free (#4102) supersedes https://github.com/paritytech/parity-bridges-common/pull/2873 Draft because of couple of TODOs: - [x] fix remaining TODOs; - [x] double check that all changes from https://github.com/paritytech/parity-bridges-common/pull/2873 are correctly ported; - [x] create a separate PR (on top of that one or a follow up?) for https://github.com/paritytech/polkadot-sdk/tree/sv-try-new-bridge-fees; - [x] fix compilation issues (haven't checked, but there should be many). --------- Co-authored-by: Adrian Catangiu --- .gitlab/pipeline/zombienet.yml | 4 +- Cargo.lock | 9 + bridges/bin/runtime-common/Cargo.toml | 2 + .../extensions/check_obsolete_extension.rs | 577 ++++++++++++++-- .../src/extensions/priority_calculator.rs | 433 ++++++++---- .../extensions/refund_relayer_extension.rs | 627 +++++++++++++----- bridges/bin/runtime-common/src/mock.rs | 4 +- .../chain-bridge-hub-cumulus/src/lib.rs | 3 + .../chains/chain-bridge-hub-kusama/src/lib.rs | 1 + .../chain-bridge-hub-polkadot/src/lib.rs | 1 + .../chains/chain-bridge-hub-rococo/src/lib.rs | 5 +- .../chain-bridge-hub-westend/src/lib.rs | 9 +- bridges/modules/grandpa/src/call_ext.rs | 358 +++++++++- bridges/modules/grandpa/src/lib.rs | 249 +++++-- bridges/modules/grandpa/src/mock.rs | 6 +- bridges/modules/grandpa/src/weights_ext.rs | 58 ++ bridges/modules/parachains/src/call_ext.rs | 299 +++++++-- bridges/modules/parachains/src/lib.rs | 344 ++++++++-- bridges/modules/parachains/src/mock.rs | 11 +- bridges/modules/parachains/src/weights_ext.rs | 27 +- bridges/primitives/parachains/src/lib.rs | 19 + bridges/primitives/runtime/src/chain.rs | 20 + .../relays/client-substrate/src/test_chain.rs | 1 + .../bridge_hub_rococo_local_network.toml | 8 +- .../bridge_hub_westend_local_network.toml | 8 +- .../rococo-westend/bridges_rococo_westend.sh | 98 +++ .../environments/rococo-westend/explorers.sh | 11 + .../environments/rococo-westend/helper.sh | 8 +- .../environments/rococo-westend/spawn.sh | 4 +- .../rococo-westend/start_relayer.sh | 26 +- .../js-helpers/native-asset-balance.js | 12 + .../roc-reaches-westend.zndsl | 6 +- .../roc-relayer-balance-does-not-change.zndsl | 11 + .../testing/tests/0001-asset-transfer/run.sh | 6 + .../wnd-reaches-rococo.zndsl | 6 +- .../wnd-relayer-balance-does-not-change.zndsl | 11 + .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 + .../src/bridge_common_config.rs | 6 +- .../src/bridge_to_bulletin_config.rs | 27 +- .../src/bridge_to_westend_config.rs | 34 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 47 +- .../bridge-hub-rococo/src/weights/mod.rs | 20 + .../bridge-hub-rococo/tests/tests.rs | 45 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 + .../src/bridge_to_rococo_config.rs | 34 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 34 +- .../bridge-hub-westend/src/weights/mod.rs | 20 + .../bridge-hub-westend/tests/tests.rs | 33 +- .../src/test_cases/from_grandpa_chain.rs | 262 +++++++- .../src/test_cases/from_parachain.rs | 298 +++++++++ .../test-utils/src/test_cases/helpers.rs | 28 + .../src/test_data/from_grandpa_chain.rs | 69 +- .../src/test_data/from_parachain.rs | 59 +- prdoc/pr_4102.prdoc | 43 ++ 54 files changed, 3730 insertions(+), 615 deletions(-) create mode 100644 bridges/modules/grandpa/src/weights_ext.rs create mode 100755 bridges/testing/environments/rococo-westend/explorers.sh create mode 100644 bridges/testing/framework/js-helpers/native-asset-balance.js create mode 100644 bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl create mode 100644 bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl create mode 100644 prdoc/pr_4102.prdoc diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 52948e1eb71..e306cb43c02 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -12,4 +12,6 @@ include: # polkadot tests - .gitlab/pipeline/zombienet/polkadot.yml # bridges tests - - .gitlab/pipeline/zombienet/bridges.yml + # TODO: https://github.com/paritytech/parity-bridges-common/pull/2884 + # commenting until we have a new relatye, compatible with updated fees scheme + # - .gitlab/pipeline/zombienet/bridges.yml diff --git a/Cargo.lock b/Cargo.lock index ad7729d4b30..d64800fb085 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2153,6 +2153,7 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", + "tuplex", ] [[package]] @@ -2311,6 +2312,7 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", + "tuplex", "westend-runtime-constants", ] @@ -2349,6 +2351,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "static_assertions", + "tuplex", ] [[package]] @@ -22046,6 +22049,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tuplex" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676ac81d5454c4dcf37955d34fa8626ede3490f744b86ca14a7b90168d2a08aa" + [[package]] name = "twox-hash" version = "1.6.3" diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 67b91a16a30..74049031afe 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -16,6 +16,7 @@ hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } +tuplex = { version = "0.1", default-features = false } # Bridge dependencies @@ -82,6 +83,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-trie/std", + "tuplex/std", "xcm-builder/std", "xcm/std", ] diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index 4b0c052df80..2c152aef682 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -18,55 +18,229 @@ //! obsolete (duplicated) data or do not pass some additional pallet-specific //! checks. -use crate::messages_call_ext::MessagesCallSubType; -use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; -use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; -use sp_runtime::transaction_validity::TransactionValidity; +use crate::{ + extensions::refund_relayer_extension::RefundableParachainId, + messages_call_ext::MessagesCallSubType, +}; +use bp_relayers::ExplicitOrAccountParams; +use bp_runtime::Parachain; +use pallet_bridge_grandpa::{ + BridgedBlockNumber, CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, +}; +use pallet_bridge_parachains::{ + CallSubType as ParachainsCallSubtype, SubmitParachainHeadsHelper, SubmitParachainHeadsInfo, +}; +use pallet_bridge_relayers::Pallet as RelayersPallet; +use sp_runtime::{ + traits::{Get, PhantomData, UniqueSaturatedInto}, + transaction_validity::{TransactionPriority, TransactionValidity, ValidTransactionBuilder}, +}; /// A duplication of the `FilterCall` trait. /// /// We need this trait in order to be able to implement it for the messages pallet, /// since the implementation is done outside of the pallet crate. -pub trait BridgeRuntimeFilterCall { - /// Checks if a runtime call is valid. - fn validate(call: &Call) -> TransactionValidity; +pub trait BridgeRuntimeFilterCall { + /// Data that may be passed from the validate to `post_dispatch`. + type ToPostDispatch; + /// Called during validation. Needs to checks whether a runtime call, submitted + /// by the `who` is valid. `who` may be `None` if transaction is not signed + /// by a regular account. + fn validate(who: &AccountId, call: &Call) -> (Self::ToPostDispatch, TransactionValidity); + /// Called after transaction is dispatched. + fn post_dispatch(_who: &AccountId, _has_failed: bool, _to_post_dispatch: Self::ToPostDispatch) { + } +} + +/// Wrapper for the bridge GRANDPA pallet that checks calls for obsolete submissions +/// and also boosts transaction priority if it has submitted by registered relayer. +/// The boost is computed as +/// `(BundledHeaderNumber - 1 - BestFinalizedHeaderNumber) * Priority::get()`. +/// The boost is only applied if submitter has active registration in the relayers +/// pallet. +pub struct CheckAndBoostBridgeGrandpaTransactions( + PhantomData<(T, I, Priority, SlashAccount)>, +); + +impl, SlashAccount: Get> + BridgeRuntimeFilterCall + for CheckAndBoostBridgeGrandpaTransactions +where + T: pallet_bridge_relayers::Config + pallet_bridge_grandpa::Config, + T::RuntimeCall: GrandpaCallSubType, +{ + // bridged header number, bundled in transaction + type ToPostDispatch = Option>; + + fn validate( + who: &T::AccountId, + call: &T::RuntimeCall, + ) -> (Self::ToPostDispatch, TransactionValidity) { + match GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) { + Ok(Some(our_tx)) => { + let to_post_dispatch = Some(our_tx.base.block_number); + let total_priority_boost = + compute_priority_boost::(who, our_tx.improved_by); + ( + to_post_dispatch, + ValidTransactionBuilder::default().priority(total_priority_boost).build(), + ) + }, + Ok(None) => (None, ValidTransactionBuilder::default().build()), + Err(e) => (None, Err(e)), + } + } + + fn post_dispatch( + relayer: &T::AccountId, + has_failed: bool, + bundled_block_number: Self::ToPostDispatch, + ) { + // we are only interested in associated pallet submissions + let Some(bundled_block_number) = bundled_block_number else { return }; + // we are only interested in failed or unneeded transactions + let has_failed = + has_failed || !SubmitFinalityProofHelper::::was_successful(bundled_block_number); + + if !has_failed { + return + } + + // let's slash registered relayer + RelayersPallet::::slash_and_deregister( + relayer, + ExplicitOrAccountParams::Explicit(SlashAccount::get()), + ); + } +} + +/// Wrapper for the bridge parachains pallet that checks calls for obsolete submissions +/// and also boosts transaction priority if it has submitted by registered relayer. +/// The boost is computed as +/// `(BundledHeaderNumber - 1 - BestKnownHeaderNumber) * Priority::get()`. +/// The boost is only applied if submitter has active registration in the relayers +/// pallet. +pub struct CheckAndBoostBridgeParachainsTransactions( + PhantomData<(T, RefPara, Priority, SlashAccount)>, +); + +impl, SlashAccount: Get> + BridgeRuntimeFilterCall + for CheckAndBoostBridgeParachainsTransactions +where + T: pallet_bridge_relayers::Config + pallet_bridge_parachains::Config, + RefPara: RefundableParachainId, + T::RuntimeCall: ParachainsCallSubtype, +{ + // bridged header number, bundled in transaction + type ToPostDispatch = Option; + + fn validate( + who: &T::AccountId, + call: &T::RuntimeCall, + ) -> (Self::ToPostDispatch, TransactionValidity) { + match ParachainsCallSubtype::::check_obsolete_submit_parachain_heads( + call, + ) { + Ok(Some(our_tx)) if our_tx.base.para_id.0 == RefPara::BridgedChain::PARACHAIN_ID => { + let to_post_dispatch = Some(our_tx.base); + let total_priority_boost = + compute_priority_boost::(&who, our_tx.improved_by); + ( + to_post_dispatch, + ValidTransactionBuilder::default().priority(total_priority_boost).build(), + ) + }, + Ok(_) => (None, ValidTransactionBuilder::default().build()), + Err(e) => (None, Err(e)), + } + } + + fn post_dispatch(relayer: &T::AccountId, has_failed: bool, maybe_update: Self::ToPostDispatch) { + // we are only interested in associated pallet submissions + let Some(update) = maybe_update else { return }; + // we are only interested in failed or unneeded transactions + let has_failed = has_failed || + !SubmitParachainHeadsHelper::::was_successful(&update); + + if !has_failed { + return + } + + // let's slash registered relayer + RelayersPallet::::slash_and_deregister( + relayer, + ExplicitOrAccountParams::Explicit(SlashAccount::get()), + ); + } } -impl BridgeRuntimeFilterCall for pallet_bridge_grandpa::Pallet +impl BridgeRuntimeFilterCall + for pallet_bridge_grandpa::Pallet where T: pallet_bridge_grandpa::Config, T::RuntimeCall: GrandpaCallSubType, { - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) + type ToPostDispatch = (); + fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) { + ( + (), + GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) + .and_then(|_| ValidTransactionBuilder::default().build()), + ) } } -impl BridgeRuntimeFilterCall +impl BridgeRuntimeFilterCall for pallet_bridge_parachains::Pallet where T: pallet_bridge_parachains::Config, T::RuntimeCall: ParachainsCallSubtype, { - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call) + type ToPostDispatch = (); + fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) { + ( + (), + ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call) + .and_then(|_| ValidTransactionBuilder::default().build()), + ) } } -impl, I: 'static> BridgeRuntimeFilterCall - for pallet_bridge_messages::Pallet +impl, I: 'static> + BridgeRuntimeFilterCall for pallet_bridge_messages::Pallet where T::RuntimeCall: MessagesCallSubType, { + type ToPostDispatch = (); /// Validate messages in order to avoid "mining" messages delivery and delivery confirmation /// transactions, that are delivering outdated messages/confirmations. Without this validation, /// even honest relayers may lose their funds if there are multiple relays running and /// submitting the same messages/confirmations. - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - call.check_obsolete_call() + fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) { + ((), call.check_obsolete_call()) } } +/// Computes priority boost that improved known header by `improved_by` +fn compute_priority_boost( + relayer: &T::AccountId, + improved_by: N, +) -> TransactionPriority +where + T: pallet_bridge_relayers::Config, + N: UniqueSaturatedInto, + Priority: Get, +{ + // we only boost priority if relayer has staked required balance + let is_relayer_registration_active = RelayersPallet::::is_registration_active(relayer); + // if tx improves by just one, there's no need to bump its priority + let improved_by: TransactionPriority = improved_by.unique_saturated_into().saturating_sub(1); + // if relayer is registered, for every skipped header we improve by `Priority` + let boost_per_header = if is_relayer_registration_active { Priority::get() } else { 0 }; + improved_by.saturating_mul(boost_per_header) +} + /// Declares a runtime-specific `BridgeRejectObsoleteHeadersAndMessages` signed extension. /// /// ## Example @@ -92,7 +266,15 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { type AccountId = $account_id; type Call = $call; type AdditionalSigned = (); - type Pre = (); + type Pre = ( + $account_id, + ( $( + <$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + $account_id, + $call, + >>::ToPostDispatch, + )* ), + ); fn additional_signed(&self) -> sp_std::result::Result< (), @@ -101,29 +283,72 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { Ok(()) } + #[allow(unused_variables)] fn validate( &self, - _who: &Self::AccountId, + who: &Self::AccountId, call: &Self::Call, _info: &sp_runtime::traits::DispatchInfoOf, _len: usize, ) -> sp_runtime::transaction_validity::TransactionValidity { - let valid = sp_runtime::transaction_validity::ValidTransaction::default(); + let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default(); + let to_prepare = (); $( - let valid = valid - .combine_with(<$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<$call>>::validate(call)?); + let (from_validate, call_filter_validity) = < + $filter_call as + $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + Self::AccountId, + $call, + >>::validate(&who, call); + let tx_validity = tx_validity.combine_with(call_filter_validity?); )* - Ok(valid) + Ok(tx_validity) } + #[allow(unused_variables)] fn pre_dispatch( self, - who: &Self::AccountId, + relayer: &Self::AccountId, call: &Self::Call, info: &sp_runtime::traits::DispatchInfoOf, len: usize, ) -> Result { - self.validate(who, call, info, len).map(drop) + use tuplex::PushBack; + let to_post_dispatch = (); + $( + let (from_validate, call_filter_validity) = < + $filter_call as + $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + $account_id, + $call, + >>::validate(&relayer, call); + let _ = call_filter_validity?; + let to_post_dispatch = to_post_dispatch.push_back(from_validate); + )* + Ok((relayer.clone(), to_post_dispatch)) + } + + #[allow(unused_variables)] + fn post_dispatch( + to_post_dispatch: Option, + info: &sp_runtime::traits::DispatchInfoOf, + post_info: &sp_runtime::traits::PostDispatchInfoOf, + len: usize, + result: &sp_runtime::DispatchResult, + ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { + use tuplex::PopFront; + let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; + let has_failed = result.is_err(); + $( + let (item, to_post_dispatch) = to_post_dispatch.pop_front(); + < + $filter_call as + $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + $account_id, + $call, + >>::post_dispatch(&relayer, has_failed, item); + )* + Ok(()) } } }; @@ -132,10 +357,23 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { #[cfg(test)] mod tests { use super::*; + use crate::{ + extensions::refund_relayer_extension::{ + tests::{ + initialize_environment, relayer_account_at_this_chain, + submit_parachain_head_call_ex, submit_relay_header_call_ex, + }, + RefundableParachain, + }, + mock::*, + }; + use bp_polkadot_core::parachains::ParaId; + use bp_runtime::HeaderId; use frame_support::{assert_err, assert_ok}; use sp_runtime::{ - traits::SignedExtension, + traits::{ConstU64, SignedExtension}, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + DispatchError, }; pub struct MockCall { @@ -143,7 +381,7 @@ mod tests { } impl sp_runtime::traits::Dispatchable for MockCall { - type RuntimeOrigin = (); + type RuntimeOrigin = u64; type Config = (); type Info = (); type PostInfo = (); @@ -156,50 +394,287 @@ mod tests { } } - struct FirstFilterCall; - impl BridgeRuntimeFilterCall for FirstFilterCall { - fn validate(call: &MockCall) -> TransactionValidity { + pub struct FirstFilterCall; + impl FirstFilterCall { + fn post_dispatch_called_with(success: bool) { + frame_support::storage::unhashed::put(&[1], &success); + } + + fn verify_post_dispatch_called_with(success: bool) { + assert_eq!(frame_support::storage::unhashed::get::(&[1]), Some(success)); + } + } + + impl BridgeRuntimeFilterCall for FirstFilterCall { + type ToPostDispatch = u64; + fn validate(_who: &u64, call: &MockCall) -> (u64, TransactionValidity) { if call.data <= 1 { - return InvalidTransaction::Custom(1).into() + return (1, InvalidTransaction::Custom(1).into()) } - Ok(ValidTransaction { priority: 1, ..Default::default() }) + (1, Ok(ValidTransaction { priority: 1, ..Default::default() })) + } + + fn post_dispatch(_who: &u64, has_failed: bool, to_post_dispatch: Self::ToPostDispatch) { + Self::post_dispatch_called_with(!has_failed); + assert_eq!(to_post_dispatch, 1); + } + } + + pub struct SecondFilterCall; + + impl SecondFilterCall { + fn post_dispatch_called_with(success: bool) { + frame_support::storage::unhashed::put(&[2], &success); + } + + fn verify_post_dispatch_called_with(success: bool) { + assert_eq!(frame_support::storage::unhashed::get::(&[2]), Some(success)); } } - struct SecondFilterCall; - impl BridgeRuntimeFilterCall for SecondFilterCall { - fn validate(call: &MockCall) -> TransactionValidity { + impl BridgeRuntimeFilterCall for SecondFilterCall { + type ToPostDispatch = u64; + fn validate(_who: &u64, call: &MockCall) -> (u64, TransactionValidity) { if call.data <= 2 { - return InvalidTransaction::Custom(2).into() + return (2, InvalidTransaction::Custom(2).into()) } - Ok(ValidTransaction { priority: 2, ..Default::default() }) + (2, Ok(ValidTransaction { priority: 2, ..Default::default() })) + } + + fn post_dispatch(_who: &u64, has_failed: bool, to_post_dispatch: Self::ToPostDispatch) { + Self::post_dispatch_called_with(!has_failed); + assert_eq!(to_post_dispatch, 2); } } #[test] - fn test() { + fn test_generated_obsolete_extension() { generate_bridge_reject_obsolete_headers_and_messages!( MockCall, - (), + u64, FirstFilterCall, SecondFilterCall ); - assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0), - InvalidTransaction::Custom(1) - ); + run_test(|| { + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 1 }, &(), 0), + InvalidTransaction::Custom(1) + ); + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( + &42, + &MockCall { data: 1 }, + &(), + 0 + ), + InvalidTransaction::Custom(1) + ); - assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0), - InvalidTransaction::Custom(2) - ); + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 2 }, &(), 0), + InvalidTransaction::Custom(2) + ); + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( + &42, + &MockCall { data: 2 }, + &(), + 0 + ), + InvalidTransaction::Custom(2) + ); - assert_ok!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0), - ValidTransaction { priority: 3, ..Default::default() } - ) + assert_eq!( + BridgeRejectObsoleteHeadersAndMessages + .validate(&42, &MockCall { data: 3 }, &(), 0) + .unwrap(), + ValidTransaction { priority: 3, ..Default::default() }, + ); + assert_eq!( + BridgeRejectObsoleteHeadersAndMessages + .pre_dispatch(&42, &MockCall { data: 3 }, &(), 0) + .unwrap(), + (42, (1, 2)), + ); + + // when post_dispatch is called with `Ok(())`, it is propagated to all "nested" + // extensions + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + Some((0, (1, 2))), + &(), + &(), + 0, + &Ok(()) + )); + FirstFilterCall::verify_post_dispatch_called_with(true); + SecondFilterCall::verify_post_dispatch_called_with(true); + + // when post_dispatch is called with `Err(())`, it is propagated to all "nested" + // extensions + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + Some((0, (1, 2))), + &(), + &(), + 0, + &Err(DispatchError::BadOrigin) + )); + FirstFilterCall::verify_post_dispatch_called_with(false); + SecondFilterCall::verify_post_dispatch_called_with(false); + }); + } + + frame_support::parameter_types! { + pub SlashDestination: ThisChainAccountId = 42; + } + + type BridgeGrandpaWrapper = + CheckAndBoostBridgeGrandpaTransactions, SlashDestination>; + + #[test] + fn grandpa_wrapper_does_not_boost_extensions_for_unregistered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + + let priority_boost = BridgeGrandpaWrapper::validate( + &relayer_account_at_this_chain(), + &submit_relay_header_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 0); + }) + } + + #[test] + fn grandpa_wrapper_boosts_extensions_for_registered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + let priority_boost = BridgeGrandpaWrapper::validate( + &relayer_account_at_this_chain(), + &submit_relay_header_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 99_000); + }) + } + + #[test] + fn grandpa_wrapper_slashes_registered_relayer_if_transaction_fails() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeGrandpaWrapper::post_dispatch(&relayer_account_at_this_chain(), true, Some(150)); + assert!(!BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) + } + + #[test] + fn grandpa_wrapper_does_not_slash_registered_relayer_if_transaction_succeeds() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeGrandpaWrapper::post_dispatch(&relayer_account_at_this_chain(), false, Some(100)); + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) + } + + type BridgeParachainsWrapper = CheckAndBoostBridgeParachainsTransactions< + TestRuntime, + RefundableParachain<(), BridgedUnderlyingParachain>, + ConstU64<1_000>, + SlashDestination, + >; + + #[test] + fn parachains_wrapper_does_not_boost_extensions_for_unregistered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + + let priority_boost = BridgeParachainsWrapper::validate( + &relayer_account_at_this_chain(), + &submit_parachain_head_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 0); + }) + } + + #[test] + fn parachains_wrapper_boosts_extensions_for_registered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + let priority_boost = BridgeParachainsWrapper::validate( + &relayer_account_at_this_chain(), + &submit_parachain_head_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 99_000); + }) + } + + #[test] + fn parachains_wrapper_slashes_registered_relayer_if_transaction_fails() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeParachainsWrapper::post_dispatch( + &relayer_account_at_this_chain(), + true, + Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(150, Default::default()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), + para_head_hash: [150u8; 32].into(), + is_free_execution_expected: false, + }), + ); + assert!(!BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) + } + + #[test] + fn parachains_wrapper_does_not_slash_registered_relayer_if_transaction_succeeds() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeParachainsWrapper::post_dispatch( + &relayer_account_at_this_chain(), + false, + Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(100, Default::default()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), + para_head_hash: [100u8; 32].into(), + is_free_execution_expected: false, + }), + ); + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) } } diff --git a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs index 5035553f508..92810290f95 100644 --- a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs @@ -22,7 +22,6 @@ //! single message with nonce `N`, then the transaction with nonces `N..=N+100` will //! be rejected. This can lower bridge throughput down to one message per block. -use bp_messages::MessageNonce; use frame_support::traits::Get; use sp_runtime::transaction_validity::TransactionPriority; @@ -30,16 +29,19 @@ use sp_runtime::transaction_validity::TransactionPriority; #[allow(unused_imports)] pub use integrity_tests::*; -/// Compute priority boost for message delivery transaction that delivers -/// given number of messages. -pub fn compute_priority_boost( - messages: MessageNonce, -) -> TransactionPriority +/// We'll deal with different bridge items here - messages, headers, ... +/// To avoid being too verbose with generic code, let's just define a separate alias. +pub type ItemCount = u64; + +/// Compute priority boost for transaction that brings given number of bridge +/// items (messages, headers, ...), when every additional item adds `PriorityBoostPerItem` +/// to transaction priority. +pub fn compute_priority_boost(n_items: ItemCount) -> TransactionPriority where - PriorityBoostPerMessage: Get, + PriorityBoostPerItem: Get, { - // we don't want any boost for transaction with single message => minus one - PriorityBoostPerMessage::get().saturating_mul(messages.saturating_sub(1)) + // we don't want any boost for transaction with single (additional) item => minus one + PriorityBoostPerItem::get().saturating_mul(n_items.saturating_sub(1)) } #[cfg(not(feature = "integrity-test"))] @@ -47,7 +49,8 @@ mod integrity_tests {} #[cfg(feature = "integrity-test")] mod integrity_tests { - use super::compute_priority_boost; + use super::{compute_priority_boost, ItemCount}; + use crate::extensions::refund_relayer_extension::RefundableParachainId; use bp_messages::MessageNonce; use bp_runtime::PreComputedSize; @@ -55,7 +58,6 @@ mod integrity_tests { dispatch::{DispatchClass, DispatchInfo, Pays, PostDispatchInfo}, traits::Get, }; - use pallet_bridge_messages::WeightInfoExt; use pallet_transaction_payment::OnChargeTransaction; use sp_runtime::{ traits::{Dispatchable, UniqueSaturatedInto, Zero}, @@ -68,37 +70,33 @@ mod integrity_tests { T, >>::Balance; - /// Ensures that the value of `PriorityBoostPerMessage` matches the value of - /// `tip_boost_per_message`. + /// Ensures that the value of `PriorityBoostPerItem` matches the value of + /// `tip_boost_per_item`. /// - /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have almost - /// the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want to be sure - /// that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the priority will be close + /// We want two transactions, `TX1` with `N` items and `TX2` with `N+1` items, have almost + /// the same priority if we'll add `tip_boost_per_item` tip to the `TX1`. We want to be sure + /// that if we add plain `PriorityBoostPerItem` priority to `TX1`, the priority will be close /// to `TX2` as well. - pub fn ensure_priority_boost_is_sane( - tip_boost_per_message: BalanceOf, + fn ensure_priority_boost_is_sane( + param_name: &str, + max_items: ItemCount, + tip_boost_per_item: Balance, + estimate_priority: impl Fn(ItemCount, Balance) -> TransactionPriority, ) where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - PriorityBoostPerMessage: Get, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, + PriorityBoostPerItem: Get, + ItemCount: UniqueSaturatedInto, + Balance: FixedPointOperand + Zero, { - let priority_boost_per_message = PriorityBoostPerMessage::get(); - let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); - for messages in 1..=maximal_messages_in_delivery_transaction { - let base_priority = estimate_message_delivery_transaction_priority::< - Runtime, - MessagesInstance, - >(messages, Zero::zero()); - let priority_boost = compute_priority_boost::(messages); - let priority_with_boost = base_priority + priority_boost; - - let tip = tip_boost_per_message.saturating_mul((messages - 1).unique_saturated_into()); - let priority_with_tip = - estimate_message_delivery_transaction_priority::(1, tip); + let priority_boost_per_item = PriorityBoostPerItem::get(); + for n_items in 1..=max_items { + let base_priority = estimate_priority(n_items, Zero::zero()); + let priority_boost = compute_priority_boost::(n_items); + let priority_with_boost = base_priority + .checked_add(priority_boost) + .expect("priority overflow: try lowering `max_items` or `tip_boost_per_item`?"); + + let tip = tip_boost_per_item.saturating_mul((n_items - 1).unique_saturated_into()); + let priority_with_tip = estimate_priority(1, tip); const ERROR_MARGIN: TransactionPriority = 5; // 5% if priority_with_boost.abs_diff(priority_with_tip).saturating_mul(100) / @@ -106,97 +104,304 @@ mod integrity_tests { ERROR_MARGIN { panic!( - "The PriorityBoostPerMessage value ({}) must be fixed to: {}", - priority_boost_per_message, - compute_priority_boost_per_message::( - tip_boost_per_message + "The {param_name} value ({}) must be fixed to: {}", + priority_boost_per_item, + compute_priority_boost_per_item( + max_items, + tip_boost_per_item, + estimate_priority ), ); } } } - /// Compute priority boost that we give to message delivery transaction for additional message. + /// Compute priority boost that we give to bridge transaction for every + /// additional bridge item. #[cfg(feature = "integrity-test")] - fn compute_priority_boost_per_message( - tip_boost_per_message: BalanceOf, + fn compute_priority_boost_per_item( + max_items: ItemCount, + tip_boost_per_item: Balance, + estimate_priority: impl Fn(ItemCount, Balance) -> TransactionPriority, ) -> TransactionPriority where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, + ItemCount: UniqueSaturatedInto, + Balance: FixedPointOperand + Zero, { - // estimate priority of transaction that delivers one message and has large tip - let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); + // estimate priority of transaction that delivers one item and has large tip let small_with_tip_priority = - estimate_message_delivery_transaction_priority::( - 1, - tip_boost_per_message - .saturating_mul(maximal_messages_in_delivery_transaction.saturated_into()), - ); - // estimate priority of transaction that delivers maximal number of messages, but has no tip - let large_without_tip_priority = estimate_message_delivery_transaction_priority::< - Runtime, - MessagesInstance, - >(maximal_messages_in_delivery_transaction, Zero::zero()); + estimate_priority(1, tip_boost_per_item.saturating_mul(max_items.saturated_into())); + // estimate priority of transaction that delivers maximal number of items, but has no tip + let large_without_tip_priority = estimate_priority(max_items, Zero::zero()); small_with_tip_priority .saturating_sub(large_without_tip_priority) - .saturating_div(maximal_messages_in_delivery_transaction - 1) + .saturating_div(max_items - 1) } - /// Estimate message delivery transaction priority. - #[cfg(feature = "integrity-test")] - fn estimate_message_delivery_transaction_priority( - messages: MessageNonce, - tip: BalanceOf, - ) -> TransactionPriority - where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, - { - // just an estimation of extra transaction bytes that are added to every transaction - // (including signature, signed extensions extra and etc + in our case it includes - // all call arguments except the proof itself) - let base_tx_size = 512; - // let's say we are relaying similar small messages and for every message we add more trie - // nodes to the proof (x0.5 because we expect some nodes to be reused) - let estimated_message_size = 512; - // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = - Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); - // messages proof argument size is (for every message) messages size + some additional - // trie nodes. Some of them are reused by different messages, so let's take 2/3 of default - // "overhead" constant - let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() - .saturating_mul(2) - .saturating_div(3) - .saturating_add(estimated_message_size) - .saturating_mul(messages as _); - - // finally we are able to estimate transaction size and weight - let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( - &PreComputedSize(transaction_size as _), - messages as _, - estimated_message_dispatch_weight.saturating_mul(messages), - ); - - pallet_transaction_payment::ChargeTransactionPayment::::get_priority( - &DispatchInfo { - weight: transaction_weight, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }, - transaction_size as _, - tip, - Zero::zero(), - ) + /// Computations, specific to bridge relay chains transactions. + pub mod per_relay_header { + use super::*; + + use bp_header_chain::{ + max_expected_submit_finality_proof_arguments_size, ChainWithGrandpa, + }; + use pallet_bridge_grandpa::WeightInfoExt; + + /// Ensures that the value of `PriorityBoostPerHeader` matches the value of + /// `tip_boost_per_header`. + /// + /// We want two transactions, `TX1` with `N` headers and `TX2` with `N+1` headers, have + /// almost the same priority if we'll add `tip_boost_per_header` tip to the `TX1`. We want + /// to be sure that if we add plain `PriorityBoostPerHeader` priority to `TX1`, the priority + /// will be close to `TX2` as well. + pub fn ensure_priority_boost_is_sane( + tip_boost_per_header: BalanceOf, + ) where + Runtime: + pallet_transaction_payment::Config + pallet_bridge_grandpa::Config, + GrandpaInstance: 'static, + PriorityBoostPerHeader: Get, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // the meaning of `max_items` here is different when comparing with message + // transactions - with messages we have a strict limit on maximal number of + // messages we can fit into a single transaction. With headers, current best + // header may be improved by any "number of items". But this number is only + // used to verify priority boost, so it should be fine to select this arbitrary + // value - it SHALL NOT affect any value, it just adds more tests for the value. + let maximal_improved_by = 4_096; + super::ensure_priority_boost_is_sane::>( + "PriorityBoostPerRelayHeader", + maximal_improved_by, + tip_boost_per_header, + |_n_headers, tip| { + estimate_relay_header_submit_transaction_priority::( + tip, + ) + }, + ); + } + + /// Estimate relay header delivery transaction priority. + #[cfg(feature = "integrity-test")] + fn estimate_relay_header_submit_transaction_priority( + tip: BalanceOf, + ) -> TransactionPriority + where + Runtime: + pallet_transaction_payment::Config + pallet_bridge_grandpa::Config, + GrandpaInstance: 'static, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // just an estimation of extra transaction bytes that are added to every transaction + // (including signature, signed extensions extra and etc + in our case it includes + // all call arguments except the proof itself) + let base_tx_size = 512; + // let's say we are relaying largest relay chain headers + let tx_call_size = max_expected_submit_finality_proof_arguments_size::< + Runtime::BridgedChain, + >(true, Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1); + + // finally we are able to estimate transaction size and weight + let transaction_size = base_tx_size.saturating_add(tx_call_size); + let transaction_weight = Runtime::WeightInfo::submit_finality_proof_weight( + Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1, + Runtime::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, + ); + + pallet_transaction_payment::ChargeTransactionPayment::::get_priority( + &DispatchInfo { + weight: transaction_weight, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }, + transaction_size as _, + tip, + Zero::zero(), + ) + } + } + + /// Computations, specific to bridge parachains transactions. + pub mod per_parachain_header { + use super::*; + + use bp_runtime::Parachain; + use pallet_bridge_parachains::WeightInfoExt; + + /// Ensures that the value of `PriorityBoostPerHeader` matches the value of + /// `tip_boost_per_header`. + /// + /// We want two transactions, `TX1` with `N` headers and `TX2` with `N+1` headers, have + /// almost the same priority if we'll add `tip_boost_per_header` tip to the `TX1`. We want + /// to be sure that if we add plain `PriorityBoostPerHeader` priority to `TX1`, the priority + /// will be close to `TX2` as well. + pub fn ensure_priority_boost_is_sane( + tip_boost_per_header: BalanceOf, + ) where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_parachains::Config, + RefundableParachain: RefundableParachainId, + PriorityBoostPerHeader: Get, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // the meaning of `max_items` here is different when comparing with message + // transactions - with messages we have a strict limit on maximal number of + // messages we can fit into a single transaction. With headers, current best + // header may be improved by any "number of items". But this number is only + // used to verify priority boost, so it should be fine to select this arbitrary + // value - it SHALL NOT affect any value, it just adds more tests for the value. + let maximal_improved_by = 4_096; + super::ensure_priority_boost_is_sane::>( + "PriorityBoostPerParachainHeader", + maximal_improved_by, + tip_boost_per_header, + |_n_headers, tip| { + estimate_parachain_header_submit_transaction_priority::< + Runtime, + RefundableParachain, + >(tip) + }, + ); + } + + /// Estimate parachain header delivery transaction priority. + #[cfg(feature = "integrity-test")] + fn estimate_parachain_header_submit_transaction_priority( + tip: BalanceOf, + ) -> TransactionPriority + where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_parachains::Config, + RefundableParachain: RefundableParachainId, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // just an estimation of extra transaction bytes that are added to every transaction + // (including signature, signed extensions extra and etc + in our case it includes + // all call arguments except the proof itself) + let base_tx_size = 512; + // let's say we are relaying largest parachain headers and proof takes some more bytes + let tx_call_size = >::WeightInfo::expected_extra_storage_proof_size() + .saturating_add(RefundableParachain::BridgedChain::MAX_HEADER_SIZE); + + // finally we are able to estimate transaction size and weight + let transaction_size = base_tx_size.saturating_add(tx_call_size); + let transaction_weight = >::WeightInfo::submit_parachain_heads_weight( + Runtime::DbWeight::get(), + &PreComputedSize(transaction_size as _), + // just one parachain - all other submissions won't receive any boost + 1, + ); + + pallet_transaction_payment::ChargeTransactionPayment::::get_priority( + &DispatchInfo { + weight: transaction_weight, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }, + transaction_size as _, + tip, + Zero::zero(), + ) + } + } + + /// Computations, specific to bridge messages transactions. + pub mod per_message { + use super::*; + + use pallet_bridge_messages::WeightInfoExt; + + /// Ensures that the value of `PriorityBoostPerMessage` matches the value of + /// `tip_boost_per_message`. + /// + /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have + /// almost the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want + /// to be sure that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the + /// priority will be close to `TX2` as well. + pub fn ensure_priority_boost_is_sane( + tip_boost_per_message: BalanceOf, + ) where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_messages::Config, + MessagesInstance: 'static, + PriorityBoostPerMessage: Get, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + let maximal_messages_in_delivery_transaction = + Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); + super::ensure_priority_boost_is_sane::>( + "PriorityBoostPerMessage", + maximal_messages_in_delivery_transaction, + tip_boost_per_message, + |n_messages, tip| { + estimate_message_delivery_transaction_priority::( + n_messages, tip, + ) + }, + ); + } + + /// Estimate message delivery transaction priority. + #[cfg(feature = "integrity-test")] + fn estimate_message_delivery_transaction_priority( + messages: MessageNonce, + tip: BalanceOf, + ) -> TransactionPriority + where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_messages::Config, + MessagesInstance: 'static, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // just an estimation of extra transaction bytes that are added to every transaction + // (including signature, signed extensions extra and etc + in our case it includes + // all call arguments except the proof itself) + let base_tx_size = 512; + // let's say we are relaying similar small messages and for every message we add more + // trie nodes to the proof (x0.5 because we expect some nodes to be reused) + let estimated_message_size = 512; + // let's say all our messages have the same dispatch weight + let estimated_message_dispatch_weight = + Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); + // messages proof argument size is (for every message) messages size + some additional + // trie nodes. Some of them are reused by different messages, so let's take 2/3 of + // default "overhead" constant + let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() + .saturating_mul(2) + .saturating_div(3) + .saturating_add(estimated_message_size) + .saturating_mul(messages as _); + + // finally we are able to estimate transaction size and weight + let transaction_size = base_tx_size.saturating_add(messages_proof_size); + let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( + &PreComputedSize(transaction_size as _), + messages as _, + estimated_message_dispatch_weight.saturating_mul(messages), + ); + + pallet_transaction_payment::ChargeTransactionPayment::::get_priority( + &DispatchInfo { + weight: transaction_weight, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }, + transaction_size as _, + tip, + Zero::zero(), + ) + } } } diff --git a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs index 64ae1d0b669..5aa7f1c095d 100644 --- a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs @@ -24,7 +24,7 @@ use crate::messages_call_ext::{ }; use bp_messages::{LaneId, MessageNonce}; use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{Chain, Parachain, ParachainIdOf, RangeInclusiveExt, StaticStrProvider}; +use bp_runtime::{Parachain, RangeInclusiveExt, StaticStrProvider}; use codec::{Codec, Decode, Encode}; use frame_support::{ dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo}, @@ -33,8 +33,7 @@ use frame_support::{ CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use pallet_bridge_grandpa::{ - CallSubType as GrandpaCallSubType, Config as GrandpaConfig, SubmitFinalityProofHelper, - SubmitFinalityProofInfo, + CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo, }; use pallet_bridge_messages::Config as MessagesConfig; use pallet_bridge_parachains::{ @@ -66,20 +65,9 @@ type CallOf = ::RuntimeCall; /// coming from this parachain. pub trait RefundableParachainId { /// The instance of the bridge parachains pallet. - type Instance; + type Instance: 'static; /// The parachain Id. - type Id: Get; -} - -/// Default implementation of `RefundableParachainId`. -pub struct DefaultRefundableParachainId(PhantomData<(Instance, Id)>); - -impl RefundableParachainId for DefaultRefundableParachainId -where - Id: Get, -{ - type Instance = Instance; - type Id = Id; + type BridgedChain: Parachain; } /// Implementation of `RefundableParachainId` for `trait Parachain`. @@ -87,10 +75,11 @@ pub struct RefundableParachain(PhantomData<(Instance, Para)>); impl RefundableParachainId for RefundableParachain where + Instance: 'static, Para: Parachain, { type Instance = Instance; - type Id = ParachainIdOf; + type BridgedChain = Para; } /// Trait identifying a bridged messages lane. A relayer might be refunded for delivering messages @@ -242,17 +231,10 @@ pub enum RelayerAccountAction { /// Everything common among our refund signed extensions. pub trait RefundSignedExtension: 'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo -where - >::BridgedChain: - Chain, { /// This chain runtime. - type Runtime: UtilityConfig> - + GrandpaConfig - + MessagesConfig<::Instance> + type Runtime: MessagesConfig<::Instance> + RelayersConfig; - /// Grandpa pallet reference. - type GrandpaInstance: 'static; /// Messages pallet and lane reference. type Msgs: RefundableMessagesLaneId; /// Refund amount calculator. @@ -276,11 +258,13 @@ where call: &CallOf, ) -> Result<&CallOf, TransactionValidityError>; - /// Called from post-dispatch and shall perform additional checks (apart from relay - /// chain finality and messages transaction finality) of given call result. + /// Called from post-dispatch and shall perform additional checks (apart from messages + /// transaction success) of given call result. fn additional_call_result_check( relayer: &AccountIdOf, call_info: &CallInfo, + extra_weight: &mut Weight, + extra_size: &mut u32, ) -> bool; /// Given post-dispatch information, analyze the outcome of relayer call and return @@ -348,35 +332,6 @@ where return slash_relayer_if_delivery_result } - // check if relay chain state has been updated - if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { - if !SubmitFinalityProofHelper::::was_successful( - finality_proof_info.block_number, - ) { - // we only refund relayer if all calls have updated chain state - log::trace!( - target: "runtime::bridge", - "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof", - Self::Id::STR, - ::Id::get(), - relayer, - ); - return slash_relayer_if_delivery_result - } - - // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll` - // transaction. If relay chain header is mandatory, the GRANDPA pallet returns - // `Pays::No`, because such transaction is mandatory for operating the bridge. But - // `utility.batchAll` transaction always requires payment. But in both cases we'll - // refund relayer - either explicitly here, or using `Pays::No` if he's choosing - // to submit dedicated transaction. - - // submitter has means to include extra weight/bytes in the `submit_finality_proof` - // call, so let's subtract extra weight/size to avoid refunding for this extra stuff - extra_weight = finality_proof_info.extra_weight; - extra_size = finality_proof_info.extra_size; - } - // Check if the `ReceiveMessagesProof` call delivered at least some of the messages that // it contained. If this happens, we consider the transaction "helpful" and refund it. let msgs_call_info = call_info.messages_call_info(); @@ -391,8 +346,13 @@ where return slash_relayer_if_delivery_result } - // do additional check - if !Self::additional_call_result_check(&relayer, &call_info) { + // do additional checks + if !Self::additional_call_result_check( + &relayer, + &call_info, + &mut extra_weight, + &mut extra_size, + ) { return slash_relayer_if_delivery_result } @@ -468,18 +428,11 @@ where RuntimeDebugNoBound, TypeInfo, )] -pub struct RefundSignedExtensionAdapter(T) -where - >::BridgedChain: - Chain; +pub struct RefundSignedExtensionAdapter(T); impl SignedExtension for RefundSignedExtensionAdapter where - >::BridgedChain: - Chain, CallOf: Dispatchable - + IsSubType, T::Runtime>> - + GrandpaCallSubType + MessagesCallSubType::Instance>, { const IDENTIFIER: &'static str = T::Id::STR; @@ -644,6 +597,14 @@ impl RefundSignedExtension for RefundBridgedParachainMessages where Self: 'static + Send + Sync, + RefundBridgedGrandpaMessages< + Runtime, + Runtime::BridgesGrandpaPalletInstance, + Msgs, + Refund, + Priority, + Id, + >: 'static + Send + Sync, Runtime: UtilityConfig> + BoundedBridgeGrandpaConfig + ParachainsConfig @@ -661,7 +622,6 @@ where + MessagesCallSubType, { type Runtime = Runtime; - type GrandpaInstance = Runtime::BridgesGrandpaPalletInstance; type Msgs = Msgs; type Refund = Refund; type Priority = Priority; @@ -687,7 +647,7 @@ where let para_finality_call = calls .next() .transpose()? - .and_then(|c| c.submit_parachain_heads_info_for(Para::Id::get())); + .and_then(|c| c.submit_parachain_heads_info_for(Para::BridgedChain::PARACHAIN_ID)); let relay_finality_call = calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info()); @@ -711,7 +671,26 @@ where Ok(call) } - fn additional_call_result_check(relayer: &Runtime::AccountId, call_info: &CallInfo) -> bool { + fn additional_call_result_check( + relayer: &Runtime::AccountId, + call_info: &CallInfo, + extra_weight: &mut Weight, + extra_size: &mut u32, + ) -> bool { + // check if relay chain state has been updated + let is_grandpa_call_successful = + RefundBridgedGrandpaMessages::< + Runtime, + Runtime::BridgesGrandpaPalletInstance, + Msgs, + Refund, + Priority, + Id, + >::additional_call_result_check(relayer, call_info, extra_weight, extra_size); + if !is_grandpa_call_successful { + return false + } + // check if parachain state has been updated if let Some(para_proof_info) = call_info.submit_parachain_heads_info() { if !SubmitParachainHeadsHelper::::was_successful( @@ -722,7 +701,7 @@ where target: "runtime::bridge", "{} from parachain {} via {:?}: relayer {:?} has submitted invalid parachain finality proof", Id::STR, - Para::Id::get(), + Para::BridgedChain::PARACHAIN_ID, Msgs::Id::get(), relayer, ); @@ -794,7 +773,6 @@ where + MessagesCallSubType, { type Runtime = Runtime; - type GrandpaInstance = GrandpaInstance; type Msgs = Msgs; type Refund = Refund; type Priority = Priority; @@ -836,13 +814,125 @@ where Ok(call) } - fn additional_call_result_check(_relayer: &Runtime::AccountId, _call_info: &CallInfo) -> bool { + fn additional_call_result_check( + relayer: &Runtime::AccountId, + call_info: &CallInfo, + extra_weight: &mut Weight, + extra_size: &mut u32, + ) -> bool { + // check if relay chain state has been updated + if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { + if !SubmitFinalityProofHelper::::was_successful( + finality_proof_info.block_number, + ) { + // we only refund relayer if all calls have updated chain state + log::trace!( + target: "runtime::bridge", + "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof", + Self::Id::STR, + ::Id::get(), + relayer, + ); + return false + } + + // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll` + // transaction. If relay chain header is mandatory, the GRANDPA pallet returns + // `Pays::No`, because such transaction is mandatory for operating the bridge. But + // `utility.batchAll` transaction always requires payment. But in both cases we'll + // refund relayer - either explicitly here, or using `Pays::No` if he's choosing + // to submit dedicated transaction. + + // submitter has means to include extra weight/bytes in the `submit_finality_proof` + // call, so let's subtract extra weight/size to avoid refunding for this extra stuff + *extra_weight = (*extra_weight).saturating_add(finality_proof_info.extra_weight); + *extra_size = (*extra_size).saturating_add(finality_proof_info.extra_size); + } + + true + } +} + +/// Transaction extension that refunds a relayer for standalone messages delivery and confirmation +/// transactions. Finality transactions are not refunded. +#[derive( + DefaultNoBound, + CloneNoBound, + Decode, + Encode, + EqNoBound, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, +)] +#[scale_info(skip_type_params(Runtime, GrandpaInstance, Msgs, Refund, Priority, Id))] +pub struct RefundBridgedMessages( + PhantomData<( + // runtime with `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed + Runtime, + // implementation of `RefundableMessagesLaneId` trait, which specifies the instance of + // the used `pallet-bridge-messages` pallet and the lane within this pallet + Msgs, + // implementation of the `RefundCalculator` trait, that is used to compute refund that + // we give to relayer for his transaction + Refund, + // getter for per-message `TransactionPriority` boost that we give to message + // delivery transactions + Priority, + // the runtime-unique identifier of this signed extension + Id, + )>, +); + +impl RefundSignedExtension + for RefundBridgedMessages +where + Self: 'static + Send + Sync, + Runtime: MessagesConfig + RelayersConfig, + Msgs: RefundableMessagesLaneId, + Refund: RefundCalculator, + Priority: Get, + Id: StaticStrProvider, + CallOf: Dispatchable + + MessagesCallSubType, +{ + type Runtime = Runtime; + type Msgs = Msgs; + type Refund = Refund; + type Priority = Priority; + type Id = Id; + + fn expand_call(call: &CallOf) -> Vec<&CallOf> { + vec![call] + } + + fn parse_and_check_for_obsolete_call( + call: &CallOf, + ) -> Result, TransactionValidityError> { + let call = Self::check_obsolete_parsed_call(call)?; + Ok(call.call_info_for(Msgs::Id::get()).map(CallInfo::Msgs)) + } + + fn check_obsolete_parsed_call( + call: &CallOf, + ) -> Result<&CallOf, TransactionValidityError> { + call.check_obsolete_call()?; + Ok(call) + } + + fn additional_call_result_check( + _relayer: &Runtime::AccountId, + _call_info: &CallInfo, + _extra_weight: &mut Weight, + _extra_size: &mut u32, + ) -> bool { + // everything is checked by the `RefundTransactionExtension` true } } #[cfg(test)] -mod tests { +pub(crate) mod tests { use super::*; use crate::{ messages::{ @@ -854,6 +944,7 @@ mod tests { }, mock::*, }; + use bp_header_chain::StoredHeaderDataBuilder; use bp_messages::{ DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, @@ -879,7 +970,6 @@ mod tests { }; parameter_types! { - TestParachain: u32 = 1000; pub TestLaneId: LaneId = TEST_LANE_ID; pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( TEST_LANE_ID, @@ -895,6 +985,14 @@ mod tests { bp_runtime::generate_static_str_provider!(TestExtension); + type TestMessagesExtensionProvider = RefundBridgedMessages< + TestRuntime, + RefundableMessagesLane<(), TestLaneId>, + ActualFeeRefund, + ConstU64<1>, + StrTestExtension, + >; + type TestMessagesExtension = RefundSignedExtensionAdapter; type TestGrandpaExtensionProvider = RefundBridgedGrandpaMessages< TestRuntime, (), @@ -906,7 +1004,7 @@ mod tests { type TestGrandpaExtension = RefundSignedExtensionAdapter; type TestExtensionProvider = RefundBridgedParachainMessages< TestRuntime, - DefaultRefundableParachainId<(), TestParachain>, + RefundableParachain<(), BridgedUnderlyingParachain>, RefundableMessagesLane<(), TestLaneId>, ActualFeeRefund, ConstU64<1>, @@ -930,7 +1028,7 @@ mod tests { TestPaymentProcedure::rewards_account(MsgDeliveryProofsRewardsAccount::get()) } - fn relayer_account_at_this_chain() -> ThisChainAccountId { + pub fn relayer_account_at_this_chain() -> ThisChainAccountId { 0 } @@ -938,7 +1036,7 @@ mod tests { 0 } - fn initialize_environment( + pub fn initialize_environment( best_relay_header_number: RelayBlockNumber, parachain_head_at_relay_header_number: RelayBlockNumber, best_message: MessageNonce, @@ -949,8 +1047,12 @@ mod tests { StoredAuthoritySet::try_new(authorities, TEST_GRANDPA_SET_ID).unwrap(), ); pallet_bridge_grandpa::BestFinalized::::put(best_relay_header); + pallet_bridge_grandpa::ImportedHeaders::::insert( + best_relay_header.hash(), + bp_test_utils::test_header::(0).build(), + ); - let para_id = ParaId(TestParachain::get()); + let para_id = ParaId(BridgedUnderlyingParachain::PARACHAIN_ID); let para_info = ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: parachain_head_at_relay_header_number, @@ -994,7 +1096,7 @@ mod tests { }) } - fn submit_relay_header_call_ex(relay_header_number: RelayBlockNumber) -> RuntimeCall { + pub fn submit_relay_header_call_ex(relay_header_number: RelayBlockNumber) -> RuntimeCall { let relay_header = BridgedChainHeader::new( relay_header_number, Default::default(), @@ -1008,6 +1110,7 @@ mod tests { finality_target: Box::new(relay_header), justification: relay_justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }) } @@ -1017,10 +1120,24 @@ mod tests { RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads { at_relay_block: (parachain_head_at_relay_header_number, RelayBlockHash::default()), parachains: vec![( - ParaId(TestParachain::get()), + ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), + [parachain_head_at_relay_header_number as u8; 32].into(), + )], + parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + }) + } + + pub fn submit_parachain_head_call_ex( + parachain_head_at_relay_header_number: RelayBlockNumber, + ) -> RuntimeCall { + RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads_ex { + at_relay_block: (parachain_head_at_relay_header_number, RelayBlockHash::default()), + parachains: vec![( + ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [parachain_head_at_relay_header_number as u8; 32].into(), )], parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + is_free_execution_expected: false, }) } @@ -1151,7 +1268,7 @@ mod tests { RuntimeCall::Utility(UtilityCall::batch_all { calls: vec![ submit_relay_header_call_ex(relay_header_number), - submit_parachain_head_call(parachain_head_at_relay_header_number), + submit_parachain_head_call_ex(parachain_head_at_relay_header_number), message_delivery_call(best_message), ], }) @@ -1179,7 +1296,7 @@ mod tests { RuntimeCall::Utility(UtilityCall::batch_all { calls: vec![ submit_relay_header_call_ex(relay_header_number), - submit_parachain_head_call(parachain_head_at_relay_header_number), + submit_parachain_head_call_ex(parachain_head_at_relay_header_number), message_confirmation_call(best_message), ], }) @@ -1194,11 +1311,14 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { base: BaseMessagesProofInfo { @@ -1231,11 +1351,14 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( BaseMessagesProofInfo { @@ -1264,6 +1387,8 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { base: BaseMessagesProofInfo { @@ -1296,6 +1421,8 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( BaseMessagesProofInfo { @@ -1320,9 +1447,10 @@ mod tests { relayer: relayer_account_at_this_chain(), call_info: CallInfo::ParachainFinalityAndMsgs( SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { base: BaseMessagesProofInfo { @@ -1344,9 +1472,10 @@ mod tests { relayer: relayer_account_at_this_chain(), call_info: CallInfo::ParachainFinalityAndMsgs( SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( BaseMessagesProofInfo { @@ -1421,8 +1550,14 @@ mod tests { extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) } - fn run_validate_ignore_priority(call: RuntimeCall) -> TransactionValidity { - run_validate(call).map(|mut tx| { + fn run_messages_validate(call: RuntimeCall) -> TransactionValidity { + let extension: TestMessagesExtension = + RefundSignedExtensionAdapter(RefundBridgedMessages(PhantomData)); + extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + } + + fn ignore_priority(tx: TransactionValidity) -> TransactionValidity { + tx.map(|mut tx| { tx.priority = 0; tx }) @@ -1444,6 +1579,14 @@ mod tests { extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) } + fn run_messages_pre_dispatch( + call: RuntimeCall, + ) -> Result>, TransactionValidityError> { + let extension: TestMessagesExtension = + RefundSignedExtensionAdapter(RefundBridgedMessages(PhantomData)); + extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + } + fn dispatch_info() -> DispatchInfo { DispatchInfo { weight: Weight::from_parts( @@ -1502,40 +1645,48 @@ mod tests { Balances::set_balance(&relayer_account_at_this_chain(), ExistentialDeposit::get()); // message delivery is failing - assert_eq!(run_validate(message_delivery_call(200)), Ok(Default::default()),); - assert_eq!( - run_validate(parachain_finality_and_delivery_batch_call(200, 200)), - Ok(Default::default()), - ); - assert_eq!( - run_validate(all_finality_and_delivery_batch_call(200, 200, 200)), - Ok(Default::default()), - ); + let fns = [run_validate, run_grandpa_validate, run_messages_validate]; + for f in fns { + assert_eq!(f(message_delivery_call(200)), Ok(Default::default()),); + assert_eq!( + f(parachain_finality_and_delivery_batch_call(200, 200)), + Ok(Default::default()), + ); + assert_eq!( + f(all_finality_and_delivery_batch_call(200, 200, 200)), + Ok(Default::default()), + ); + assert_eq!( + f(all_finality_and_delivery_batch_call_ex(200, 200, 200)), + Ok(Default::default()), + ); + } + + // message confirmation validation is passing assert_eq!( - run_validate(all_finality_and_delivery_batch_call_ex(200, 200, 200)), + ignore_priority(run_validate(message_confirmation_call(200))), Ok(Default::default()), ); - // message confirmation validation is passing assert_eq!( - run_validate_ignore_priority(message_confirmation_call(200)), + ignore_priority(run_messages_validate(message_confirmation_call(200))), Ok(Default::default()), ); assert_eq!( - run_validate_ignore_priority(parachain_finality_and_confirmation_batch_call( + ignore_priority(run_validate(parachain_finality_and_confirmation_batch_call( 200, 200 - )), + ))), Ok(Default::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call( 200, 200, 200 - )), + ))), Ok(Default::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call_ex( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call_ex( 200, 200, 200 - )), + ))), Ok(Default::default()), ); }); @@ -1549,25 +1700,28 @@ mod tests { BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) .unwrap(); - let priority_of_100_messages_delivery = - run_validate(message_delivery_call(200)).unwrap().priority; - let priority_of_200_messages_delivery = - run_validate(message_delivery_call(300)).unwrap().priority; - assert!( - priority_of_200_messages_delivery > priority_of_100_messages_delivery, - "Invalid priorities: {} for 200 messages vs {} for 100 messages", - priority_of_200_messages_delivery, - priority_of_100_messages_delivery, - ); + let fns = [run_validate, run_grandpa_validate, run_messages_validate]; + for f in fns { + let priority_of_100_messages_delivery = + f(message_delivery_call(200)).unwrap().priority; + let priority_of_200_messages_delivery = + f(message_delivery_call(300)).unwrap().priority; + assert!( + priority_of_200_messages_delivery > priority_of_100_messages_delivery, + "Invalid priorities: {} for 200 messages vs {} for 100 messages", + priority_of_200_messages_delivery, + priority_of_100_messages_delivery, + ); - let priority_of_100_messages_confirmation = - run_validate(message_confirmation_call(200)).unwrap().priority; - let priority_of_200_messages_confirmation = - run_validate(message_confirmation_call(300)).unwrap().priority; - assert_eq!( - priority_of_100_messages_confirmation, - priority_of_200_messages_confirmation - ); + let priority_of_100_messages_confirmation = + f(message_confirmation_call(200)).unwrap().priority; + let priority_of_200_messages_confirmation = + f(message_confirmation_call(300)).unwrap().priority; + assert_eq!( + priority_of_100_messages_confirmation, + priority_of_200_messages_confirmation + ); + } }); } @@ -1579,23 +1733,24 @@ mod tests { BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) .unwrap(); - let priority_of_max_messages_delivery = run_validate(message_delivery_call( - 100 + MaxUnconfirmedMessagesAtInboundLane::get(), - )) - .unwrap() - .priority; - let priority_of_more_than_max_messages_delivery = run_validate(message_delivery_call( - 100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1, - )) - .unwrap() - .priority; - - assert!( - priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, - "Invalid priorities: {} for MAX messages vs {} for MAX+1 messages", - priority_of_max_messages_delivery, - priority_of_more_than_max_messages_delivery, - ); + let fns = [run_validate, run_grandpa_validate, run_messages_validate]; + for f in fns { + let priority_of_max_messages_delivery = + f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get())) + .unwrap() + .priority; + let priority_of_more_than_max_messages_delivery = + f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1)) + .unwrap() + .priority; + + assert!( + priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, + "Invalid priorities: {} for MAX messages vs {} for MAX+1 messages", + priority_of_max_messages_delivery, + priority_of_more_than_max_messages_delivery, + ); + } }); } @@ -1605,45 +1760,54 @@ mod tests { initialize_environment(100, 100, 100); assert_eq!( - run_validate_ignore_priority(message_delivery_call(200)), + ignore_priority(run_validate(message_delivery_call(200))), + Ok(ValidTransaction::default()), + ); + assert_eq!( + ignore_priority(run_validate(message_confirmation_call(200))), + Ok(ValidTransaction::default()), + ); + + assert_eq!( + ignore_priority(run_messages_validate(message_delivery_call(200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(message_confirmation_call(200)), + ignore_priority(run_messages_validate(message_confirmation_call(200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(parachain_finality_and_delivery_batch_call(200, 200)), + ignore_priority(run_validate(parachain_finality_and_delivery_batch_call(200, 200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(parachain_finality_and_confirmation_batch_call( + ignore_priority(run_validate(parachain_finality_and_confirmation_batch_call( 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_delivery_batch_call(200, 200, 200)), + ignore_priority(run_validate(all_finality_and_delivery_batch_call(200, 200, 200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_delivery_batch_call_ex( + ignore_priority(run_validate(all_finality_and_delivery_batch_call_ex( 200, 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call( 200, 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call_ex( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call_ex( 200, 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); }); @@ -1933,8 +2097,11 @@ mod tests { RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads { at_relay_block: (100, RelayBlockHash::default()), parachains: vec![ - (ParaId(TestParachain::get()), [1u8; 32].into()), - (ParaId(TestParachain::get() + 1), [1u8; 32].into()), + (ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [1u8; 32].into()), + ( + ParaId(BridgedUnderlyingParachain::PARACHAIN_ID + 1), + [1u8; 32].into(), + ), ], parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, }), @@ -2318,6 +2485,148 @@ mod tests { }); } + #[test] + fn messages_ext_only_parses_standalone_transactions() { + run_test(|| { + initialize_environment(100, 100, 100); + + // relay + parachain + message delivery calls batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_delivery_batch_call(200, 200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_delivery_batch_call_ex(200, 200, 200) + ), + Ok(None), + ); + + // relay + parachain + message confirmation calls batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_confirmation_batch_call(200, 200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_confirmation_batch_call_ex(200, 200, 200) + ), + Ok(None), + ); + + // parachain + message delivery call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + ¶chain_finality_and_delivery_batch_call(200, 200) + ), + Ok(None), + ); + + // parachain + message confirmation call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + ¶chain_finality_and_confirmation_batch_call(200, 200) + ), + Ok(None), + ); + + // relay + message delivery call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_delivery_batch_call(200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_delivery_batch_call_ex(200, 200) + ), + Ok(None), + ); + + // relay + message confirmation call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_confirmation_batch_call(200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_confirmation_batch_call_ex(200, 200) + ), + Ok(None), + ); + + // message delivery call batch is accepted + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &message_delivery_call(200) + ), + Ok(Some(delivery_pre_dispatch_data().call_info)), + ); + + // message confirmation call batch is accepted + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &message_confirmation_call(200) + ), + Ok(Some(confirmation_pre_dispatch_data().call_info)), + ); + }); + } + + #[test] + fn messages_ext_rejects_calls_with_obsolete_messages() { + run_test(|| { + initialize_environment(100, 100, 100); + + assert_eq!( + run_messages_pre_dispatch(message_delivery_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + assert_eq!( + run_messages_pre_dispatch(message_confirmation_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + + assert_eq!( + run_messages_validate(message_delivery_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + assert_eq!( + run_messages_validate(message_confirmation_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + }); + } + + #[test] + fn messages_ext_accepts_calls_with_new_messages() { + run_test(|| { + initialize_environment(100, 100, 100); + + assert_eq!( + run_messages_pre_dispatch(message_delivery_call(200)), + Ok(Some(delivery_pre_dispatch_data())), + ); + assert_eq!( + run_messages_pre_dispatch(message_confirmation_call(200)), + Ok(Some(confirmation_pre_dispatch_data())), + ); + + assert_eq!(run_messages_validate(message_delivery_call(200)), Ok(Default::default()),); + assert_eq!( + run_messages_validate(message_confirmation_call(200)), + Ok(Default::default()), + ); + }); + } + #[test] fn grandpa_ext_only_parses_valid_batches() { run_test(|| { diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index ad71cd0d456..e323f1edfc7 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -183,7 +183,8 @@ impl pallet_transaction_payment::Config for TestRuntime { impl pallet_bridge_grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = BridgedUnderlyingChain; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<1_024>; type HeadersToKeep = ConstU32<8>; type WeightInfo = pallet_bridge_grandpa::weights::BridgeWeight; } @@ -406,6 +407,7 @@ impl Chain for BridgedUnderlyingParachain { impl Parachain for BridgedUnderlyingParachain { const PARACHAIN_ID: u32 = 42; + const MAX_HEADER_SIZE: u32 = 1_024; } /// The other, bridged chain, used in tests. diff --git a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs index c49aa4b8563..a5c90ceba11 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs @@ -39,6 +39,9 @@ use frame_support::{ use frame_system::limits; use sp_std::time::Duration; +/// Maximal bridge hub header size. +pub const MAX_BRIDGE_HUB_HEADER_SIZE: u32 = 4_096; + /// Average block interval in Cumulus-based parachains. /// /// Corresponds to the `MILLISECS_PER_BLOCK` from `parachains_common` crate. diff --git a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs index 576e3dbee80..ef3ef4ab7b7 100644 --- a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs @@ -62,6 +62,7 @@ impl Chain for BridgeHubKusama { impl Parachain for BridgeHubKusama { const PARACHAIN_ID: u32 = BRIDGE_HUB_KUSAMA_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubKusama { diff --git a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs index 6db389c9299..9db71af928e 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs @@ -59,6 +59,7 @@ impl Chain for BridgeHubPolkadot { impl Parachain for BridgeHubPolkadot { const PARACHAIN_ID: u32 = BRIDGE_HUB_POLKADOT_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubPolkadot { diff --git a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs index abce872d7ba..d7097f01c53 100644 --- a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs @@ -59,6 +59,7 @@ impl Chain for BridgeHubRococo { impl Parachain for BridgeHubRococo { const PARACHAIN_ID: u32 = BRIDGE_HUB_ROCOCO_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubRococo { @@ -103,9 +104,9 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 5_651_581_649; + pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 314_037_860; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_901_781; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 57_414_813; } diff --git a/bridges/chains/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs index 4af895cc6d3..800f290d7bf 100644 --- a/bridges/chains/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-westend/src/lib.rs @@ -58,6 +58,7 @@ impl Chain for BridgeHubWestend { impl Parachain for BridgeHubWestend { const PARACHAIN_ID: u32 = BRIDGE_HUB_WESTEND_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubWestend { @@ -93,10 +94,10 @@ frame_support::parameter_types! { pub const BridgeHubWestendBaseXcmFeeInWnds: u128 = 17_756_830_000; /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 1_695_489_961_344; + /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) + pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 94_211_536_452; /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 1_618_309_961_344; + /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) + pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 17_224_486_452; } diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index 4a7ebb3cc8d..6fa62ec0cff 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -15,20 +15,24 @@ // along with Parity Bridges Common. If not, see . use crate::{ - weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, CurrentAuthoritySet, Error, - Pallet, + weights::WeightInfo, BestFinalized, BridgedBlockNumber, BridgedHeader, Config, + CurrentAuthoritySet, Error, FreeHeadersRemaining, Pallet, }; use bp_header_chain::{ justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size, ChainWithGrandpa, GrandpaConsensusLogReader, }; -use bp_runtime::{BlockNumberOf, OwnedBridgeModule}; +use bp_runtime::{BlockNumberOf, Chain, OwnedBridgeModule}; use codec::Encode; -use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight}; +use frame_support::{ + dispatch::CallableCallFor, + traits::{Get, IsSubType}, + weights::Weight, +}; use sp_consensus_grandpa::SetId; use sp_runtime::{ - traits::{Header, Zero}, - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + traits::{CheckedSub, Header, Zero}, + transaction_validity::{InvalidTransaction, TransactionValidityError}, RuntimeDebug, SaturatedConversion, }; @@ -40,6 +44,11 @@ pub struct SubmitFinalityProofInfo { /// An identifier of the validators set that has signed the submitted justification. /// It might be `None` if deprecated version of the `submit_finality_proof` is used. pub current_set_id: Option, + /// If `true`, then the call proves new **mandatory** header. + pub is_mandatory: bool, + /// If `true`, then the call must be free (assuming that everything else is valid) to + /// be treated as valid. + pub is_free_execution_expected: bool, /// Extra weight that we assume is included in the call. /// /// We have some assumptions about headers and justifications of the bridged chain. @@ -54,6 +63,16 @@ pub struct SubmitFinalityProofInfo { pub extra_size: u32, } +/// Verified `SubmitFinalityProofInfo`. +#[derive(Copy, Clone, PartialEq, RuntimeDebug)] +pub struct VerifiedSubmitFinalityProofInfo { + /// Base call information. + pub base: SubmitFinalityProofInfo, + /// A difference between bundled bridged header and best bridged header known to us + /// before the call. + pub improved_by: N, +} + impl SubmitFinalityProofInfo { /// Returns `true` if call size/weight is below our estimations for regular calls. pub fn fits_limits(&self) -> bool { @@ -67,14 +86,86 @@ pub struct SubmitFinalityProofHelper, I: 'static> { } impl, I: 'static> SubmitFinalityProofHelper { + /// Returns `true` if we may fit more free headers into the current block. If `false` is + /// returned, the call will be paid even if `is_free_execution_expected` has been set + /// to `true`. + pub fn has_free_header_slots() -> bool { + // `unwrap_or(u32::MAX)` means that if `FreeHeadersRemaining` is `None`, we may accept + // this header for free. That is a small cheat - it is `None` if executed outside of + // transaction (e.g. during block initialization). Normal relayer would never submit + // such calls, but if he did, that is not our problem. During normal transactions, + // the `FreeHeadersRemaining` is always `Some(_)`. + let free_headers_remaining = FreeHeadersRemaining::::get().unwrap_or(u32::MAX); + free_headers_remaining > 0 + } + + /// Check that the: (1) GRANDPA head provided by the `SubmitFinalityProof` is better than the + /// best one we know (2) if `current_set_id` matches the current authority set id, if specified + /// and (3) whether transaction MAY be free for the submitter if `is_free_execution_expected` + /// is `true`. + /// + /// Returns number of headers between the current best finalized header, known to the pallet + /// and the bundled header. + pub fn check_obsolete_from_extension( + call_info: &SubmitFinalityProofInfo>, + ) -> Result, Error> { + // do basic checks first + let improved_by = Self::check_obsolete(call_info.block_number, call_info.current_set_id)?; + + // if submitter has NOT specified that it wants free execution, then we are done + if !call_info.is_free_execution_expected { + return Ok(improved_by); + } + + // else - if we can not accept more free headers, "reject" the transaction + if !Self::has_free_header_slots() { + log::trace!( + target: crate::LOG_TARGET, + "Cannot accept free {:?} header {:?}. No more free slots remaining", + T::BridgedChain::ID, + call_info.block_number, + ); + + return Err(Error::::FreeHeadersLimitExceded); + } + + // ensure that the `improved_by` is larger than the configured free interval + if !call_info.is_mandatory { + if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { + if improved_by < free_headers_interval.into() { + log::trace!( + target: crate::LOG_TARGET, + "Cannot accept free {:?} header {:?}. Too small difference \ + between submitted headers: {:?} vs {}", + T::BridgedChain::ID, + call_info.block_number, + improved_by, + free_headers_interval, + ); + + return Err(Error::::BelowFreeHeaderInterval); + } + } + } + + // we do not check whether the header matches free submission criteria here - it is the + // relayer responsibility to check that + + Ok(improved_by) + } + /// Check that the GRANDPA head provided by the `SubmitFinalityProof` is better than the best /// one we know. Additionally, checks if `current_set_id` matches the current authority set - /// id, if specified. + /// id, if specified. This method is called by the call code and the transaction extension, + /// so it does not check the free execution. + /// + /// Returns number of headers between the current best finalized header, known to the pallet + /// and the bundled header. pub fn check_obsolete( finality_target: BlockNumberOf, current_set_id: Option, - ) -> Result<(), Error> { - let best_finalized = crate::BestFinalized::::get().ok_or_else(|| { + ) -> Result, Error> { + let best_finalized = BestFinalized::::get().ok_or_else(|| { log::trace!( target: crate::LOG_TARGET, "Cannot finalize header {:?} because pallet is not yet initialized", @@ -83,16 +174,19 @@ impl, I: 'static> SubmitFinalityProofHelper { >::NotInitialized })?; - if best_finalized.number() >= finality_target { - log::trace!( - target: crate::LOG_TARGET, - "Cannot finalize obsolete header: bundled {:?}, best {:?}", - finality_target, - best_finalized, - ); + let improved_by = match finality_target.checked_sub(&best_finalized.number()) { + Some(improved_by) if improved_by > Zero::zero() => improved_by, + _ => { + log::trace!( + target: crate::LOG_TARGET, + "Cannot finalize obsolete header: bundled {:?}, best {:?}", + finality_target, + best_finalized, + ); - return Err(Error::::OldHeader) - } + return Err(Error::::OldHeader) + }, + }; if let Some(current_set_id) = current_set_id { let actual_set_id = >::get().set_id; @@ -108,12 +202,12 @@ impl, I: 'static> SubmitFinalityProofHelper { } } - Ok(()) + Ok(improved_by) } /// Check if the `SubmitFinalityProof` was successfully executed. pub fn was_successful(finality_target: BlockNumberOf) -> bool { - match crate::BestFinalized::::get() { + match BestFinalized::::get() { Some(best_finalized) => best_finalized.number() == finality_target, None => false, } @@ -135,17 +229,20 @@ pub trait CallSubType, I: 'static>: finality_target, justification, None, + false, )) } else if let Some(crate::Call::::submit_finality_proof_ex { finality_target, justification, current_set_id, + is_free_execution_expected, }) = self.is_sub_type() { return Some(submit_finality_proof_info_from_args::( finality_target, justification, Some(*current_set_id), + *is_free_execution_expected, )) } @@ -155,26 +252,36 @@ pub trait CallSubType, I: 'static>: /// Validate Grandpa headers in order to avoid "mining" transactions that provide outdated /// bridged chain headers. Without this validation, even honest relayers may lose their funds /// if there are multiple relays running and submitting the same information. - fn check_obsolete_submit_finality_proof(&self) -> TransactionValidity + /// + /// Returns `Ok(None)` if the call is not the `submit_finality_proof` call of our pallet. + /// Returns `Ok(Some(_))` if the call is the `submit_finality_proof` call of our pallet and + /// we believe the call brings header that improves the pallet state. + /// Returns `Err(_)` if the call is the `submit_finality_proof` call of our pallet and we + /// believe that the call will fail. + fn check_obsolete_submit_finality_proof( + &self, + ) -> Result< + Option>>, + TransactionValidityError, + > where Self: Sized, { - let finality_target = match self.submit_finality_proof_info() { + let call_info = match self.submit_finality_proof_info() { Some(finality_proof) => finality_proof, - _ => return Ok(ValidTransaction::default()), + _ => return Ok(None), }; if Pallet::::ensure_not_halted().is_err() { - return InvalidTransaction::Call.into() + return Err(InvalidTransaction::Call.into()) } - match SubmitFinalityProofHelper::::check_obsolete( - finality_target.block_number, - finality_target.current_set_id, - ) { - Ok(_) => Ok(ValidTransaction::default()), - Err(Error::::OldHeader) => InvalidTransaction::Stale.into(), - Err(_) => InvalidTransaction::Call.into(), + let result = SubmitFinalityProofHelper::::check_obsolete_from_extension(&call_info); + match result { + Ok(improved_by) => + Ok(Some(VerifiedSubmitFinalityProofInfo { base: call_info, improved_by })), + Err(Error::::OldHeader) => Err(InvalidTransaction::Stale.into()), + Err(_) => Err(InvalidTransaction::Call.into()), } } } @@ -189,6 +296,7 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( finality_target: &BridgedHeader, justification: &GrandpaJustification>, current_set_id: Option, + is_free_execution_expected: bool, ) -> SubmitFinalityProofInfo> { let block_number = *finality_target.number(); @@ -230,16 +338,26 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( ); let extra_size = actual_call_size.saturating_sub(max_expected_call_size); - SubmitFinalityProofInfo { block_number, current_set_id, extra_weight, extra_size } + SubmitFinalityProofInfo { + block_number, + current_set_id, + is_mandatory: is_mandatory_finality_target, + is_free_execution_expected, + extra_weight, + extra_size, + } } #[cfg(test)] mod tests { use crate::{ call_ext::CallSubType, - mock::{run_test, test_header, RuntimeCall, TestBridgedChain, TestNumber, TestRuntime}, - BestFinalized, Config, CurrentAuthoritySet, PalletOperatingMode, StoredAuthoritySet, - SubmitFinalityProofInfo, WeightInfo, + mock::{ + run_test, test_header, FreeHeadersInterval, RuntimeCall, TestBridgedChain, TestNumber, + TestRuntime, + }, + BestFinalized, Config, CurrentAuthoritySet, FreeHeadersRemaining, PalletOperatingMode, + StoredAuthoritySet, SubmitFinalityProofInfo, WeightInfo, }; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{BasicOperatingMode, HeaderId}; @@ -247,6 +365,7 @@ mod tests { make_default_justification, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_SET_ID, }; + use codec::Encode; use frame_support::weights::Weight; use sp_runtime::{testing::DigestItem, traits::Header as _, SaturatedConversion}; @@ -256,6 +375,7 @@ mod tests { justification: make_default_justification(&test_header(num)), // not initialized => zero current_set_id: 0, + is_free_execution_expected: false, }; RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( bridge_grandpa_call, @@ -311,6 +431,121 @@ mod tests { }); } + #[test] + fn extension_rejects_new_header_if_free_execution_is_requested_and_free_submissions_are_not_accepted( + ) { + run_test(|| { + let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(test_header(10 + FreeHeadersInterval::get() as u64)), + justification: make_default_justification(&test_header( + 10 + FreeHeadersInterval::get() as u64, + )), + current_set_id: 0, + is_free_execution_expected: true, + }; + sync_to_header_10(); + + // when we can accept free headers => Ok + FreeHeadersRemaining::::put(2); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + + // when we can NOT accept free headers => Err + FreeHeadersRemaining::::put(0); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_err()); + + // when called outside of transaction => Ok + FreeHeadersRemaining::::kill(); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call, + ),) + .is_ok()); + }) + } + + #[test] + fn extension_rejects_new_header_if_free_execution_is_requested_and_improved_by_is_below_expected( + ) { + run_test(|| { + let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(test_header(100)), + justification: make_default_justification(&test_header(100)), + current_set_id: 0, + is_free_execution_expected: true, + }; + sync_to_header_10(); + + // when `improved_by` is less than the free interval + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64 + 1, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_err()); + + // when `improved_by` is equal to the free interval + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + + // when `improved_by` is larger than the free interval + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64 - 1, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + + // when `improved_by` is less than the free interval BUT it is a mandatory header + let mut mandatory_header = test_header(100); + let consensus_log = sp_consensus_grandpa::ConsensusLog::::ScheduledChange( + sp_consensus_grandpa::ScheduledChange { + next_authorities: bp_test_utils::authority_list(), + delay: 0, + }, + ); + mandatory_header.digest = sp_runtime::Digest { + logs: vec![DigestItem::Consensus( + sp_consensus_grandpa::GRANDPA_ENGINE_ID, + consensus_log.encode(), + )], + }; + let justification = make_justification_for_header(JustificationGeneratorParams { + header: mandatory_header.clone(), + set_id: 1, + ..Default::default() + }); + let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(mandatory_header), + justification, + current_set_id: 0, + is_free_execution_expected: true, + }; + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64 + 1, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + }) + } + #[test] fn extension_accepts_new_header() { run_test(|| { @@ -336,6 +571,8 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }) ); @@ -345,6 +582,7 @@ mod tests { finality_target: Box::new(test_header(42)), justification: make_default_justification(&test_header(42)), current_set_id: 777, + is_free_execution_expected: false, }); assert_eq!( deprecated_call.submit_finality_proof_info(), @@ -353,6 +591,8 @@ mod tests { current_set_id: Some(777), extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }) ); } @@ -370,6 +610,7 @@ mod tests { finality_target: Box::new(small_finality_target), justification: small_justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_eq!(small_call.submit_finality_proof_info().unwrap().extra_size, 0); @@ -387,6 +628,7 @@ mod tests { finality_target: Box::new(large_finality_target), justification: large_justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_ne!(large_call.submit_finality_proof_info().unwrap().extra_size, 0); } @@ -406,6 +648,7 @@ mod tests { finality_target: Box::new(finality_target.clone()), justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); @@ -420,7 +663,52 @@ mod tests { finality_target: Box::new(finality_target), justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, call_weight); } + + #[test] + fn check_obsolete_submit_finality_proof_returns_correct_improved_by() { + run_test(|| { + fn make_call(number: u64) -> RuntimeCall { + RuntimeCall::Grandpa(crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(test_header(number)), + justification: make_default_justification(&test_header(number)), + current_set_id: 0, + is_free_execution_expected: false, + }) + } + + sync_to_header_10(); + + // when the difference between headers is 1 + assert_eq!( + RuntimeCall::check_obsolete_submit_finality_proof(&make_call(11)) + .unwrap() + .unwrap() + .improved_by, + 1, + ); + + // when the difference between headers is 2 + assert_eq!( + RuntimeCall::check_obsolete_submit_finality_proof(&make_call(12)) + .unwrap() + .unwrap() + .improved_by, + 2, + ); + }) + } + + #[test] + fn check_obsolete_submit_finality_proof_ignores_other_calls() { + run_test(|| { + let call = + RuntimeCall::System(frame_system::Call::::remark { remark: vec![42] }); + + assert_eq!(RuntimeCall::check_obsolete_submit_finality_proof(&call), Ok(None)); + }) + } } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index 9e095651ef8..cb536eb07ff 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -44,6 +44,7 @@ use bp_header_chain::{ }; use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule}; use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound}; +use sp_consensus_grandpa::SetId; use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, @@ -57,6 +58,7 @@ mod storage_types; /// Module, containing weights for this pallet. pub mod weights; +pub mod weights_ext; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; @@ -65,6 +67,7 @@ pub mod benchmarking; pub use call_ext::*; pub use pallet::*; pub use weights::WeightInfo; +pub use weights_ext::WeightInfoExt; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-grandpa"; @@ -101,17 +104,31 @@ pub mod pallet { /// The chain we are bridging to here. type BridgedChain: ChainWithGrandpa; - /// Maximal number of "free" mandatory header transactions per block. + /// Maximal number of "free" header transactions per block. /// /// To be able to track the bridged chain, the pallet requires all headers that are /// changing GRANDPA authorities set at the bridged chain (we call them mandatory). - /// So it is a common good deed to submit mandatory headers to the pallet. However, if the - /// bridged chain gets compromised, its validators may generate as many mandatory headers - /// as they want. And they may fill the whole block (at this chain) for free. This constants - /// limits number of calls that we may refund in a single block. All calls above this - /// limit are accepted, but are not refunded. + /// So it is a common good deed to submit mandatory headers to the pallet. + /// + /// The pallet may be configured (see `[Self::FreeHeadersInterval]`) to import some + /// non-mandatory headers for free as well. It also may be treated as a common good + /// deed, because it may help to reduce bridge fees - this cost may be deducted from + /// bridge fees, paid by message senders. + /// + /// However, if the bridged chain gets compromised, its validators may generate as many + /// "free" headers as they want. And they may fill the whole block (at this chain) for + /// free. This constants limits number of calls that we may refund in a single block. + /// All calls above this limit are accepted, but are not refunded. + #[pallet::constant] + type MaxFreeHeadersPerBlock: Get; + + /// The distance between bridged chain headers, that may be submitted for free. The + /// first free header is header number zero, the next one is header number + /// `FreeHeadersInterval::get()` or any of its descendant if that header has not + /// been submitted. In other words, interval between free headers should be at least + /// `FreeHeadersInterval`. #[pallet::constant] - type MaxFreeMandatoryHeadersPerBlock: Get; + type FreeHeadersInterval: Get>; /// Maximal number of finalized headers to keep in the storage. /// @@ -124,7 +141,7 @@ pub mod pallet { type HeadersToKeep: Get; /// Weights gathered through benchmarking. - type WeightInfo: WeightInfo; + type WeightInfo: WeightInfoExt; } #[pallet::pallet] @@ -133,12 +150,12 @@ pub mod pallet { #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { fn on_initialize(_n: BlockNumberFor) -> Weight { - FreeMandatoryHeadersRemaining::::put(T::MaxFreeMandatoryHeadersPerBlock::get()); + FreeHeadersRemaining::::put(T::MaxFreeHeadersPerBlock::get()); Weight::zero() } fn on_finalize(_n: BlockNumberFor) { - FreeMandatoryHeadersRemaining::::kill(); + FreeHeadersRemaining::::kill(); } } @@ -155,7 +172,7 @@ pub mod pallet { /// `submit_finality_proof_ex` instead. Semantically, this call is an equivalent of the /// `submit_finality_proof_ex` call without current authority set id check. #[pallet::call_index(0)] - #[pallet::weight(::submit_finality_proof( + #[pallet::weight(T::WeightInfo::submit_finality_proof_weight( justification.commit.precommits.len().saturated_into(), justification.votes_ancestries.len().saturated_into(), ))] @@ -175,6 +192,8 @@ pub mod pallet { // the `submit_finality_proof_ex` also reads this value, but it is done from the // cache, so we don't treat it as an additional db access >::get().set_id, + // cannot enforce free execution using this call + false, ) } @@ -250,8 +269,14 @@ pub mod pallet { /// - verification is not optimized or invalid; /// /// - header contains forced authorities set change or change with non-zero delay. + /// + /// The `is_free_execution_expected` parameter is not really used inside the call. It is + /// used by the transaction extension, which should be registered at the runtime level. If + /// this parameter is `true`, the transaction will be treated as invalid, if the call won't + /// be executed for free. If transaction extension is not used by the runtime, this + /// parameter is not used at all. #[pallet::call_index(4)] - #[pallet::weight(::submit_finality_proof( + #[pallet::weight(T::WeightInfo::submit_finality_proof_weight( justification.commit.precommits.len().saturated_into(), justification.votes_ancestries.len().saturated_into(), ))] @@ -260,6 +285,7 @@ pub mod pallet { finality_target: Box>, justification: GrandpaJustification>, current_set_id: sp_consensus_grandpa::SetId, + _is_free_execution_expected: bool, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; ensure_signed(origin)?; @@ -273,7 +299,8 @@ pub mod pallet { // it checks whether the `number` is better than the current best block number // and whether the `current_set_id` matches the best known set id - SubmitFinalityProofHelper::::check_obsolete(number, Some(current_set_id))?; + let improved_by = + SubmitFinalityProofHelper::::check_obsolete(number, Some(current_set_id))?; let authority_set = >::get(); let unused_proof_size = authority_set.unused_proof_size(); @@ -283,23 +310,16 @@ pub mod pallet { let maybe_new_authority_set = try_enact_authority_change::(&finality_target, set_id)?; - let may_refund_call_fee = maybe_new_authority_set.is_some() && - // if we have seen too many mandatory headers in this block, we don't want to refund - Self::free_mandatory_headers_remaining() > 0 && - // if arguments out of expected bounds, we don't want to refund - submit_finality_proof_info_from_args::(&finality_target, &justification, Some(current_set_id)) - .fits_limits(); + let may_refund_call_fee = may_refund_call_fee::( + &finality_target, + &justification, + current_set_id, + improved_by, + ); if may_refund_call_fee { - FreeMandatoryHeadersRemaining::::mutate(|count| { - *count = count.saturating_sub(1) - }); + on_free_header_imported::(); } insert_header::(*finality_target, hash); - log::info!( - target: LOG_TARGET, - "Successfully imported finalized header with hash {:?}!", - hash - ); // mandatory header is a header that changes authorities set. The pallet can't go // further without importing this header. So every bridge MUST import mandatory headers. @@ -311,6 +331,13 @@ pub mod pallet { // to pay for the transaction. let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes }; + log::info!( + target: LOG_TARGET, + "Successfully imported finalized header with hash {:?}! Free: {}", + hash, + if may_refund_call_fee { "Yes" } else { "No" }, + ); + // the proof size component of the call weight assumes that there are // `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen` // estimation). But if their number is lower, then we may "refund" some `proof_size`, @@ -335,20 +362,18 @@ pub mod pallet { } } - /// Number mandatory headers that we may accept in the current block for free (returning - /// `Pays::No`). + /// Number of free header submissions that we may yet accept in the current block. /// - /// If the `FreeMandatoryHeadersRemaining` hits zero, all following mandatory headers in the + /// If the `FreeHeadersRemaining` hits zero, all following mandatory headers in the /// current block are accepted with fee (`Pays::Yes` is returned). /// - /// The `FreeMandatoryHeadersRemaining` is an ephemeral value that is set to - /// `MaxFreeMandatoryHeadersPerBlock` at each block initialization and is killed on block + /// The `FreeHeadersRemaining` is an ephemeral value that is set to + /// `MaxFreeHeadersPerBlock` at each block initialization and is killed on block /// finalization. So it never ends up in the storage trie. #[pallet::storage] #[pallet::whitelist_storage] - #[pallet::getter(fn free_mandatory_headers_remaining)] - pub(super) type FreeMandatoryHeadersRemaining, I: 'static = ()> = - StorageValue<_, u32, ValueQuery>; + pub type FreeHeadersRemaining, I: 'static = ()> = + StorageValue<_, u32, OptionQuery>; /// Hash of the header used to bootstrap the pallet. #[pallet::storage] @@ -473,6 +498,68 @@ pub mod pallet { /// The `current_set_id` argument of the `submit_finality_proof_ex` doesn't match /// the id of the current set, known to the pallet. InvalidAuthoritySetId, + /// The submitter wanted free execution, but we can't fit more free transactions + /// to the block. + FreeHeadersLimitExceded, + /// The submitter wanted free execution, but the difference between best known and + /// bundled header numbers is below the `FreeHeadersInterval`. + BelowFreeHeaderInterval, + } + + /// Called when new free header is imported. + pub fn on_free_header_imported, I: 'static>() { + FreeHeadersRemaining::::mutate(|count| { + *count = match *count { + None => None, + // the signed extension expects that `None` means outside of block + // execution - i.e. when transaction is validated from the transaction pool, + // so use `saturating_sub` and don't go from `Some(0)`->`None` + Some(count) => Some(count.saturating_sub(1)), + } + }); + } + + /// Return true if we may refund transaction cost to the submitter. In other words, + /// this transaction is considered as common good deed w.r.t to pallet configuration. + fn may_refund_call_fee, I: 'static>( + finality_target: &BridgedHeader, + justification: &GrandpaJustification>, + current_set_id: SetId, + improved_by: BridgedBlockNumber, + ) -> bool { + // if we have refunded too much at this block => not refunding + if FreeHeadersRemaining::::get().unwrap_or(0) == 0 { + return false; + } + + // if size/weight of call is larger than expected => not refunding + let call_info = submit_finality_proof_info_from_args::( + &finality_target, + &justification, + Some(current_set_id), + // this function is called from the transaction body and we do not want + // to do MAY-be-free-executed checks here - they had to be done in the + // transaction extension before + false, + ); + if !call_info.fits_limits() { + return false; + } + + // if that's a mandatory header => refund + if call_info.is_mandatory { + return true; + } + + // if configuration allows free non-mandatory headers and the header + // matches criteria => refund + if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { + if improved_by >= free_headers_interval.into() { + return true; + } + } + + false } /// Check the given header for a GRANDPA scheduled authority set change. If a change @@ -692,8 +779,8 @@ pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader mod tests { use super::*; use crate::mock::{ - run_test, test_header, RuntimeEvent as TestEvent, RuntimeOrigin, System, TestBridgedChain, - TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES, + run_test, test_header, FreeHeadersInterval, RuntimeEvent as TestEvent, RuntimeOrigin, + System, TestBridgedChain, TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES, }; use bp_header_chain::BridgeGrandpaCall; use bp_runtime::BasicOperatingMode; @@ -747,6 +834,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ) } @@ -766,6 +854,7 @@ mod tests { Box::new(header), justification, set_id, + false, ) } @@ -794,6 +883,7 @@ mod tests { Box::new(header), justification, set_id, + false, ) } @@ -1009,6 +1099,7 @@ mod tests { Box::new(header.clone()), justification.clone(), TEST_GRANDPA_SET_ID, + false, ), >::InvalidJustification ); @@ -1018,6 +1109,7 @@ mod tests { Box::new(header), justification, next_set_id, + false, ), >::InvalidAuthoritySetId ); @@ -1039,6 +1131,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::InvalidJustification ); @@ -1069,6 +1162,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::InvalidAuthoritySet ); @@ -1108,6 +1202,7 @@ mod tests { Box::new(header.clone()), justification.clone(), TEST_GRANDPA_SET_ID, + false, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::No); @@ -1171,6 +1266,7 @@ mod tests { Box::new(header.clone()), justification, TEST_GRANDPA_SET_ID, + false, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); @@ -1203,6 +1299,7 @@ mod tests { Box::new(header.clone()), justification, TEST_GRANDPA_SET_ID, + false, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); @@ -1233,6 +1330,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::UnsupportedScheduledChange ); @@ -1259,6 +1357,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::UnsupportedScheduledChange ); @@ -1285,6 +1384,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::TooManyAuthoritiesInSet ); @@ -1350,12 +1450,13 @@ mod tests { Box::new(header), invalid_justification, TEST_GRANDPA_SET_ID, + false, ) }; initialize_substrate_bridge(); - for _ in 0..::MaxFreeMandatoryHeadersPerBlock::get() + 1 { + for _ in 0..::MaxFreeHeadersPerBlock::get() + 1 { assert_err!(submit_invalid_request(), >::InvalidJustification); } @@ -1423,6 +1524,64 @@ mod tests { }) } + #[test] + fn may_import_non_mandatory_header_for_free() { + run_test(|| { + initialize_substrate_bridge(); + + // set best finalized to `100` + const BEST: u8 = 12; + fn reset_best() { + BestFinalized::::set(Some(HeaderId( + BEST as _, + Default::default(), + ))); + } + + // non-mandatory header is imported with fee + reset_best(); + let non_free_header_number = BEST + FreeHeadersInterval::get() as u8 - 1; + let result = submit_finality_proof(non_free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // non-mandatory free header is imported without fee + reset_best(); + let free_header_number = BEST + FreeHeadersInterval::get() as u8; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // another non-mandatory free header is imported without fee + let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 2; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // now the rate limiter starts charging fees even for free headers + let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 3; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // check that we can import for free if `improved_by` is larger + // than the free interval + next_block(); + reset_best(); + let free_header_number = FreeHeadersInterval::get() as u8 + 42; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // check that the rate limiter shares the counter between mandatory + // and free non-mandatory headers + next_block(); + reset_best(); + let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 4; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + let result = submit_mandatory_finality_proof(free_header_number + 1, 1); + assert_eq!(result.expect("call failed").pays_fee, Pays::No); + let result = submit_mandatory_finality_proof(free_header_number + 2, 2); + assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); + }); + } + #[test] fn should_prune_headers_over_headers_to_keep_parameter() { run_test(|| { @@ -1519,9 +1678,23 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), DispatchError::BadOrigin, ); }) } + + #[test] + fn on_free_header_imported_never_sets_to_none() { + run_test(|| { + FreeHeadersRemaining::::set(Some(2)); + on_free_header_imported::(); + assert_eq!(FreeHeadersRemaining::::get(), Some(1)); + on_free_header_imported::(); + assert_eq!(FreeHeadersRemaining::::get(), Some(0)); + on_free_header_imported::(); + assert_eq!(FreeHeadersRemaining::::get(), Some(0)); + }) + } } diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index e689e520c92..27df9d9c78f 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -48,14 +48,16 @@ impl frame_system::Config for TestRuntime { } parameter_types! { - pub const MaxFreeMandatoryHeadersPerBlock: u32 = 2; + pub const MaxFreeHeadersPerBlock: u32 = 2; + pub const FreeHeadersInterval: u32 = 32; pub const HeadersToKeep: u32 = 5; } impl grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = TestBridgedChain; - type MaxFreeMandatoryHeadersPerBlock = MaxFreeMandatoryHeadersPerBlock; + type MaxFreeHeadersPerBlock = MaxFreeHeadersPerBlock; + type FreeHeadersInterval = FreeHeadersInterval; type HeadersToKeep = HeadersToKeep; type WeightInfo = (); } diff --git a/bridges/modules/grandpa/src/weights_ext.rs b/bridges/modules/grandpa/src/weights_ext.rs new file mode 100644 index 00000000000..66edea6fb6a --- /dev/null +++ b/bridges/modules/grandpa/src/weights_ext.rs @@ -0,0 +1,58 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Weight-related utilities. + +use crate::weights::{BridgeWeight, WeightInfo}; + +use frame_support::weights::Weight; + +/// Extended weight info. +pub trait WeightInfoExt: WeightInfo { + // Our configuration assumes that the runtime has special signed extensions used to: + // + // 1) boost priority of `submit_finality_proof` transactions; + // + // 2) slash relayer if he submits an invalid transaction. + // + // We read and update storage values of other pallets (`pallet-bridge-relayers` and + // balances/assets pallet). So we need to add this weight to the weight of our call. + // Hence two following methods. + + /// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions + /// that are declared at runtime level. + fn submit_finality_proof_overhead_from_runtime() -> Weight; + + // Functions that are directly mapped to extrinsics weights. + + /// Weight of message delivery extrinsic. + fn submit_finality_proof_weight(precommits_len: u32, votes_ancestries_len: u32) -> Weight { + let base_weight = Self::submit_finality_proof(precommits_len, votes_ancestries_len); + base_weight.saturating_add(Self::submit_finality_proof_overhead_from_runtime()) + } +} + +impl WeightInfoExt for BridgeWeight { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + Weight::zero() + } +} + +impl WeightInfoExt for () { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + Weight::zero() + } +} diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs index da91a40a232..fe6b319205d 100644 --- a/bridges/modules/parachains/src/call_ext.rs +++ b/bridges/modules/parachains/src/call_ext.rs @@ -14,25 +14,45 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{Config, Pallet, RelayBlockNumber}; +use crate::{Config, GrandpaPalletOf, Pallet, RelayBlockHash, RelayBlockNumber}; +use bp_header_chain::HeaderChain; use bp_parachains::BestParaHeadHash; use bp_polkadot_core::parachains::{ParaHash, ParaId}; -use bp_runtime::OwnedBridgeModule; -use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; +use bp_runtime::{HeaderId, OwnedBridgeModule}; +use frame_support::{ + dispatch::CallableCallFor, + traits::{Get, IsSubType}, +}; +use pallet_bridge_grandpa::SubmitFinalityProofHelper; use sp_runtime::{ - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + traits::Zero, + transaction_validity::{InvalidTransaction, TransactionValidityError}, RuntimeDebug, }; /// Info about a `SubmitParachainHeads` call which tries to update a single parachain. #[derive(PartialEq, RuntimeDebug)] pub struct SubmitParachainHeadsInfo { - /// Number of the finalized relay block that has been used to prove parachain finality. - pub at_relay_block_number: RelayBlockNumber, + /// Number and hash of the finalized relay block that has been used to prove parachain + /// finality. + pub at_relay_block: HeaderId, /// Parachain identifier. pub para_id: ParaId, /// Hash of the bundled parachain head. pub para_head_hash: ParaHash, + /// If `true`, then the call must be free (assuming that everything else is valid) to + /// be treated as valid. + pub is_free_execution_expected: bool, +} + +/// Verified `SubmitParachainHeadsInfo`. +#[derive(PartialEq, RuntimeDebug)] +pub struct VerifiedSubmitParachainHeadsInfo { + /// Base call information. + pub base: SubmitParachainHeadsInfo, + /// A difference between bundled bridged relay chain header and relay chain header number + /// used to prove best bridged parachain header, known to us before the call. + pub improved_by: RelayBlockNumber, } /// Helper struct that provides methods for working with the `SubmitParachainHeads` call. @@ -41,40 +61,117 @@ pub struct SubmitParachainHeadsHelper, I: 'static> { } impl, I: 'static> SubmitParachainHeadsHelper { - /// Check if the para head provided by the `SubmitParachainHeads` is better than the best one - /// we know. - pub fn is_obsolete(update: &SubmitParachainHeadsInfo) -> bool { - let stored_best_head = match crate::ParasInfo::::get(update.para_id) { - Some(stored_best_head) => stored_best_head, - None => return false, + /// Check that is called from signed extension and takes the `is_free_execution_expected` + /// into account. + pub fn check_obsolete_from_extension( + update: &SubmitParachainHeadsInfo, + ) -> Result { + // first do all base checks + let improved_by = Self::check_obsolete(update)?; + + // if we don't expect free execution - no more checks + if !update.is_free_execution_expected { + return Ok(improved_by); + } + + // reject if no more free slots remaining in the block + if !SubmitFinalityProofHelper::::has_free_header_slots() + { + log::trace!( + target: crate::LOG_TARGET, + "The free parachain {:?} head can't be updated: no more free slots \ + left in the block.", + update.para_id, + ); + + return Err(InvalidTransaction::Call.into()); + } + + // if free headers interval is not configured and call is expected to execute + // for free => it is a relayer error, it should've been able to detect that. + let free_headers_interval = match T::FreeHeadersInterval::get() { + Some(free_headers_interval) => free_headers_interval, + None => return Ok(improved_by), }; - if stored_best_head.best_head_hash.at_relay_block_number >= update.at_relay_block_number { + // reject if we are importing parachain headers too often + if improved_by < free_headers_interval { log::trace!( target: crate::LOG_TARGET, - "The parachain head can't be updated. The parachain head for {:?} \ - was already updated at better relay chain block {} >= {}.", + "The free parachain {:?} head can't be updated: it improves previous + best head by {} while at least {} is expected.", update.para_id, - stored_best_head.best_head_hash.at_relay_block_number, - update.at_relay_block_number + improved_by, + free_headers_interval, ); - return true + + return Err(InvalidTransaction::Stale.into()); } - if stored_best_head.best_head_hash.head_hash == update.para_head_hash { + Ok(improved_by) + } + + /// Check if the para head provided by the `SubmitParachainHeads` is better than the best one + /// we know. + pub fn check_obsolete( + update: &SubmitParachainHeadsInfo, + ) -> Result { + // check if we know better parachain head already + let improved_by = match crate::ParasInfo::::get(update.para_id) { + Some(stored_best_head) => { + let improved_by = match update + .at_relay_block + .0 + .checked_sub(stored_best_head.best_head_hash.at_relay_block_number) + { + Some(improved_by) if improved_by > Zero::zero() => improved_by, + _ => { + log::trace!( + target: crate::LOG_TARGET, + "The parachain head can't be updated. The parachain head for {:?} \ + was already updated at better relay chain block {} >= {}.", + update.para_id, + stored_best_head.best_head_hash.at_relay_block_number, + update.at_relay_block.0 + ); + return Err(InvalidTransaction::Stale.into()) + }, + }; + + if stored_best_head.best_head_hash.head_hash == update.para_head_hash { + log::trace!( + target: crate::LOG_TARGET, + "The parachain head can't be updated. The parachain head hash for {:?} \ + was already updated to {} at block {} < {}.", + update.para_id, + update.para_head_hash, + stored_best_head.best_head_hash.at_relay_block_number, + update.at_relay_block.0 + ); + return Err(InvalidTransaction::Stale.into()) + } + + improved_by + }, + None => RelayBlockNumber::MAX, + }; + + // let's check if our chain had no reorgs and we still know the relay chain header + // used to craft the proof + if GrandpaPalletOf::::finalized_header_state_root(update.at_relay_block.1).is_none() { log::trace!( target: crate::LOG_TARGET, - "The parachain head can't be updated. The parachain head hash for {:?} \ - was already updated to {} at block {} < {}.", + "The parachain {:?} head can't be updated. Relay chain header {}/{} used to create \ + parachain proof is missing from the storage.", update.para_id, - update.para_head_hash, - stored_best_head.best_head_hash.at_relay_block_number, - update.at_relay_block_number + update.at_relay_block.0, + update.at_relay_block.1, ); - return true + + return Err(InvalidTransaction::Call.into()) } - false + Ok(improved_by) } /// Check if the `SubmitParachainHeads` was successfully executed. @@ -83,7 +180,7 @@ impl, I: 'static> SubmitParachainHeadsHelper { Some(stored_best_head) => stored_best_head.best_head_hash == BestParaHeadHash { - at_relay_block_number: update.at_relay_block_number, + at_relay_block_number: update.at_relay_block.0, head_hash: update.para_head_hash, }, None => false, @@ -98,22 +195,36 @@ pub trait CallSubType, I: 'static>: /// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with /// one single parachain entry. fn one_entry_submit_parachain_heads_info(&self) -> Option { - if let Some(crate::Call::::submit_parachain_heads { - ref at_relay_block, - ref parachains, - .. - }) = self.is_sub_type() - { - if let &[(para_id, para_head_hash)] = parachains.as_slice() { - return Some(SubmitParachainHeadsInfo { - at_relay_block_number: at_relay_block.0, + match self.is_sub_type() { + Some(crate::Call::::submit_parachain_heads { + ref at_relay_block, + ref parachains, + .. + }) => match ¶chains[..] { + &[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(at_relay_block.0, at_relay_block.1), para_id, para_head_hash, - }) - } + is_free_execution_expected: false, + }), + _ => None, + }, + Some(crate::Call::::submit_parachain_heads_ex { + ref at_relay_block, + ref parachains, + is_free_execution_expected, + .. + }) => match ¶chains[..] { + &[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(at_relay_block.0, at_relay_block.1), + para_id, + para_head_hash, + is_free_execution_expected: *is_free_execution_expected, + }), + _ => None, + }, + _ => None, } - - None } /// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with @@ -133,24 +244,23 @@ pub trait CallSubType, I: 'static>: /// block production, or "eat" significant portion of block production time literally /// for nothing. In addition, the single-parachain-head-per-transaction is how the /// pallet will be used in our environment. - fn check_obsolete_submit_parachain_heads(&self) -> TransactionValidity + fn check_obsolete_submit_parachain_heads( + &self, + ) -> Result, TransactionValidityError> where Self: Sized, { let update = match self.one_entry_submit_parachain_heads_info() { Some(update) => update, - None => return Ok(ValidTransaction::default()), + None => return Ok(None), }; if Pallet::::ensure_not_halted().is_err() { - return InvalidTransaction::Call.into() + return Err(InvalidTransaction::Call.into()) } - if SubmitParachainHeadsHelper::::is_obsolete(&update) { - return InvalidTransaction::Stale.into() - } - - Ok(ValidTransaction::default()) + SubmitParachainHeadsHelper::::check_obsolete_from_extension(&update) + .map(|improved_by| Some(VerifiedSubmitParachainHeadsInfo { base: update, improved_by })) } } @@ -164,9 +274,10 @@ where #[cfg(test)] mod tests { use crate::{ - mock::{run_test, RuntimeCall, TestRuntime}, - CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockNumber, + mock::{run_test, FreeHeadersInterval, RuntimeCall, TestRuntime}, + CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockHash, RelayBlockNumber, }; + use bp_header_chain::StoredHeaderData; use bp_parachains::BestParaHeadHash; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use bp_runtime::BasicOperatingMode; @@ -175,15 +286,37 @@ mod tests { num: RelayBlockNumber, parachains: Vec<(ParaId, ParaHash)>, ) -> bool { - RuntimeCall::Parachains(crate::Call::::submit_parachain_heads { - at_relay_block: (num, Default::default()), + RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { + at_relay_block: (num, [num as u8; 32].into()), + parachains, + parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + is_free_execution_expected: false, + }) + .check_obsolete_submit_parachain_heads() + .is_ok() + } + + fn validate_free_submit_parachain_heads( + num: RelayBlockNumber, + parachains: Vec<(ParaId, ParaHash)>, + ) -> bool { + RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { + at_relay_block: (num, [num as u8; 32].into()), parachains, parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + is_free_execution_expected: true, }) .check_obsolete_submit_parachain_heads() .is_ok() } + fn insert_relay_block(num: RelayBlockNumber) { + pallet_bridge_grandpa::ImportedHeaders::::insert( + RelayBlockHash::from([num as u8; 32]), + StoredHeaderData { number: num, state_root: RelayBlockHash::from([10u8; 32]) }, + ); + } + fn sync_to_relay_header_10() { ParasInfo::::insert( ParaId(1), @@ -244,6 +377,7 @@ mod tests { // when current best finalized is #10 and we're trying to import header#15 => tx is // accepted sync_to_relay_header_10(); + insert_relay_block(15); assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); }); } @@ -260,4 +394,65 @@ mod tests { )); }); } + + #[test] + fn extension_rejects_initial_parachain_head_if_missing_relay_chain_header() { + run_test(|| { + // when relay chain header is unknown => "obsolete" + assert!(!validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); + // when relay chain header is unknown => "ok" + insert_relay_block(10); + assert!(validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); + }); + } + + #[test] + fn extension_rejects_free_parachain_head_if_missing_relay_chain_header() { + run_test(|| { + sync_to_relay_header_10(); + // when relay chain header is unknown => "obsolete" + assert!(!validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())])); + // when relay chain header is unknown => "ok" + insert_relay_block(15); + assert!(validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())])); + }); + } + + #[test] + fn extension_rejects_free_parachain_head_if_no_free_slots_remaining() { + run_test(|| { + // when current best finalized is #10 and we're trying to import header#15 => tx should + // be accepted + sync_to_relay_header_10(); + insert_relay_block(15); + // ... but since we have specified `is_free_execution_expected = true`, it'll be + // rejected + assert!(!validate_free_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); + // ... if we have specify `is_free_execution_expected = false`, it'll be accepted + assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); + }); + } + + #[test] + fn extension_rejects_free_parachain_head_if_improves_by_is_below_expected() { + run_test(|| { + // when current best finalized is #10 and we're trying to import header#15 => tx should + // be accepted + sync_to_relay_header_10(); + insert_relay_block(10 + FreeHeadersInterval::get() - 1); + insert_relay_block(10 + FreeHeadersInterval::get()); + // try to submit at 10 + FreeHeadersInterval::get() - 1 => failure + let relay_header = 10 + FreeHeadersInterval::get() - 1; + assert!(!validate_free_submit_parachain_heads( + relay_header, + vec![(ParaId(1), [2u8; 32].into())] + )); + // try to submit at 10 + FreeHeadersInterval::get() => ok + let relay_header = 10 + FreeHeadersInterval::get(); + assert!(validate_free_submit_parachain_heads( + relay_header, + vec![(ParaId(1), [2u8; 32].into())] + )); + }); + } } diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs index 1363a637604..61e04aed377 100644 --- a/bridges/modules/parachains/src/lib.rs +++ b/bridges/modules/parachains/src/lib.rs @@ -32,6 +32,7 @@ use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHe use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError}; use frame_support::{dispatch::PostDispatchInfo, DefaultNoBound}; +use pallet_bridge_grandpa::SubmitFinalityProofHelper; use sp_std::{marker::PhantomData, vec::Vec}; #[cfg(feature = "runtime-benchmarks")] @@ -92,7 +93,8 @@ pub mod pallet { BoundedStorageValue<>::MaxParaHeadDataSize, ParaStoredHeaderData>; /// Weight info of the given parachains pallet. pub type WeightInfoOf = >::WeightInfo; - type GrandpaPalletOf = + /// Bridge GRANDPA pallet that is used to verify parachain proofs. + pub type GrandpaPalletOf = pallet_bridge_grandpa::Pallet>::BridgesGrandpaPalletInstance>; #[pallet::event] @@ -192,6 +194,21 @@ pub mod pallet { /// /// The GRANDPA pallet instance must be configured to import headers of relay chain that /// we're interested in. + /// + /// The associated GRANDPA pallet is also used to configure free parachain heads + /// submissions. The parachain head submission will be free if: + /// + /// 1) the submission contains exactly one parachain head update that succeeds; + /// + /// 2) the difference between relay chain block numbers, used to prove new parachain head + /// and previous best parachain head is larger than the `FreeHeadersInterval`, configured + /// at the associated GRANDPA pallet; + /// + /// 3) there are slots for free submissions, remaining at the block. This is also configured + /// at the associated GRANDPA pallet using `MaxFreeHeadersPerBlock` parameter. + /// + /// First parachain head submission is also free for the submitted, if free submissions + /// are yet accepted to this block. type BridgesGrandpaPalletInstance: 'static; /// Name of the original `paras` pallet in the `construct_runtime!()` call at the bridged @@ -335,10 +352,83 @@ pub mod pallet { at_relay_block: (RelayBlockNumber, RelayBlockHash), parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, + ) -> DispatchResultWithPostInfo { + Self::submit_parachain_heads_ex( + origin, + at_relay_block, + parachains, + parachain_heads_proof, + false, + ) + } + + /// Change `PalletOwner`. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::call_index(1)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { + >::set_owner(origin, new_owner) + } + + /// Halt or resume all pallet operations. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::call_index(2)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + operating_mode: BasicOperatingMode, + ) -> DispatchResult { + >::set_operating_mode(origin, operating_mode) + } + + /// Submit proof of one or several parachain heads. + /// + /// The proof is supposed to be proof of some `Heads` entries from the + /// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain. + /// The proof is supposed to be crafted at the `relay_header_hash` that must already be + /// imported by corresponding GRANDPA pallet at this chain. + /// + /// The call fails if: + /// + /// - the pallet is halted; + /// + /// - the relay chain block `at_relay_block` is not imported by the associated bridge + /// GRANDPA pallet. + /// + /// The call may succeed, but some heads may not be updated e.g. because pallet knows + /// better head or it isn't tracked by the pallet. + /// + /// The `is_free_execution_expected` parameter is not really used inside the call. It is + /// used by the transaction extension, which should be registered at the runtime level. If + /// this parameter is `true`, the transaction will be treated as invalid, if the call won't + /// be executed for free. If transaction extension is not used by the runtime, this + /// parameter is not used at all. + #[pallet::call_index(3)] + #[pallet::weight(WeightInfoOf::::submit_parachain_heads_weight( + T::DbWeight::get(), + parachain_heads_proof, + parachains.len() as _, + ))] + pub fn submit_parachain_heads_ex( + origin: OriginFor, + at_relay_block: (RelayBlockNumber, RelayBlockHash), + parachains: Vec<(ParaId, ParaHash)>, + parachain_heads_proof: ParaHeadsProof, + _is_free_execution_expected: bool, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; ensure_signed(origin)?; + let total_parachains = parachains.len(); + let free_headers_interval = + T::FreeHeadersInterval::get().unwrap_or(RelayBlockNumber::MAX); + // the pallet allows two kind of free submissions + // 1) if distance between all parachain heads is gte than the [`T::FreeHeadersInterval`] + // 2) if all heads are the first heads of their parachains + let mut free_parachain_heads = 0; + // we'll need relay chain header to verify that parachains heads are always increasing. let (relay_block_number, relay_block_hash) = at_relay_block; let relay_block = pallet_bridge_grandpa::ImportedHeaders::< @@ -358,6 +448,7 @@ pub mod pallet { parachains.len() as _, ); + let mut is_updated_something = false; let mut storage = GrandpaPalletOf::::storage_proof_checker( relay_block_hash, parachain_heads_proof.storage_proof, @@ -414,6 +505,7 @@ pub mod pallet { } // convert from parachain head into stored parachain head data + let parachain_head_size = parachain_head.0.len(); let parachain_head_data = match T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) { Some(parachain_head_data) => parachain_head_data, @@ -430,13 +522,30 @@ pub mod pallet { let update_result: Result<_, ()> = ParasInfo::::try_mutate(parachain, |stored_best_head| { + let is_free = parachain_head_size < + T::ParaStoredHeaderDataBuilder::max_free_head_size() as usize && + match stored_best_head { + Some(ref best_head) + if at_relay_block.0.saturating_sub( + best_head.best_head_hash.at_relay_block_number, + ) >= free_headers_interval => + true, + Some(_) => false, + None => true, + }; let artifacts = Pallet::::update_parachain_head( parachain, stored_best_head.take(), - relay_block_number, + HeaderId(relay_block_number, relay_block_hash), parachain_head_data, parachain_head_hash, )?; + + is_updated_something = true; + if is_free { + free_parachain_heads = free_parachain_heads + 1; + } + *stored_best_head = Some(artifacts.best_head); Ok(artifacts.prune_happened) }); @@ -467,28 +576,21 @@ pub mod pallet { Error::::HeaderChainStorageProof(HeaderChainError::StorageProof(e)) })?; - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - >::set_owner(origin, new_owner) - } + // check if we allow this submission for free + let is_free = total_parachains == 1 + && free_parachain_heads == total_parachains + && SubmitFinalityProofHelper::::has_free_header_slots(); + let pays_fee = if is_free { + log::trace!(target: LOG_TARGET, "Parachain heads update transaction is free"); + pallet_bridge_grandpa::on_free_header_imported::( + ); + Pays::No + } else { + log::trace!(target: LOG_TARGET, "Parachain heads update transaction is paid"); + Pays::Yes + }; - /// Halt or resume all pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(2)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: BasicOperatingMode, - ) -> DispatchResult { - >::set_operating_mode(origin, operating_mode) + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) } } @@ -545,18 +647,20 @@ pub mod pallet { pub(super) fn update_parachain_head( parachain: ParaId, stored_best_head: Option, - new_at_relay_block_number: RelayBlockNumber, + new_at_relay_block: HeaderId, new_head_data: ParaStoredHeaderData, new_head_hash: ParaHash, ) -> Result { // check if head has been already updated at better relay chain block. Without this // check, we may import heads in random order let update = SubmitParachainHeadsInfo { - at_relay_block_number: new_at_relay_block_number, + at_relay_block: new_at_relay_block, para_id: parachain, para_head_hash: new_head_hash, + // doesn't actually matter here + is_free_execution_expected: false, }; - if SubmitParachainHeadsHelper::::is_obsolete(&update) { + if SubmitParachainHeadsHelper::::check_obsolete(&update).is_err() { Self::deposit_event(Event::RejectedObsoleteParachainHead { parachain, parachain_head_hash: new_head_hash, @@ -596,7 +700,7 @@ pub mod pallet { ImportedParaHashes::::try_get(parachain, next_imported_hash_position); let updated_best_para_head = ParaInfo { best_head_hash: BestParaHeadHash { - at_relay_block_number: new_at_relay_block_number, + at_relay_block_number: new_at_relay_block.0, head_hash: new_head_hash, }, next_imported_hash_position: (next_imported_hash_position + 1) % @@ -610,9 +714,10 @@ pub mod pallet { ImportedParaHeads::::insert(parachain, new_head_hash, updated_head_data); log::trace!( target: LOG_TARGET, - "Updated head of parachain {:?} to {}", + "Updated head of parachain {:?} to {} at relay block {}", parachain, new_head_hash, + new_at_relay_block.0, ); // remove old head @@ -696,14 +801,28 @@ impl, I: 'static, C: Parachain> HeaderChain pub fn initialize_for_benchmarks, I: 'static, PC: Parachain>( header: HeaderOf, ) { + use bp_runtime::HeaderIdProvider; + use sp_runtime::traits::Header; + + let relay_head = + pallet_bridge_grandpa::BridgedHeader::::new( + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); let parachain = ParaId(PC::PARACHAIN_ID); let parachain_head = ParaHead(header.encode()); let updated_head_data = T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) .expect("failed to build stored parachain head in benchmarks"); + pallet_bridge_grandpa::initialize_for_benchmarks::( + relay_head.clone(), + ); Pallet::::update_parachain_head( parachain, None, - 0, + relay_head.id(), updated_head_data, parachain_head.hash(), ) @@ -714,9 +833,9 @@ pub fn initialize_for_benchmarks, I: 'static, PC: Parachain::DbWeight; pub(crate) fn initialize(state_root: RelayBlockHash) -> RelayBlockHash { + pallet_bridge_grandpa::FreeHeadersRemaining::::set(Some(100)); pallet_bridge_grandpa::Pallet::::initialize( RuntimeOrigin::root(), bp_header_chain::InitializationData { @@ -770,10 +891,6 @@ pub(crate) mod tests { num: RelayBlockNumber, state_root: RelayBlockHash, ) -> (ParaHash, GrandpaJustification) { - pallet_bridge_grandpa::Pallet::::on_initialize( - 0, - ); - let header = test_relay_header(num, state_root); let hash = header.hash(); let justification = make_default_justification(&header); @@ -783,6 +900,7 @@ pub(crate) mod tests { Box::new(header), justification.clone(), TEST_GRANDPA_SET_ID, + false, ) ); @@ -908,7 +1026,7 @@ pub(crate) mod tests { run_test(|| { initialize(state_root); - // we're trying to update heads of parachains 1, 2 and 3 + // we're trying to update heads of parachains 1 and 3 let expected_weight = WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 2); let result = Pallet::::submit_parachain_heads( @@ -918,9 +1036,10 @@ pub(crate) mod tests { proof, ); assert_ok!(result); + assert_eq!(result.expect("checked above").pays_fee, Pays::Yes); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); - // but only 1 and 2 are updated, because proof is missing head of parachain#2 + // 1 and 3 are updated, because proof is missing head of parachain#2 assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!(ParasInfo::::get(ParaId(2)), None); assert_eq!( @@ -989,7 +1108,9 @@ pub(crate) mod tests { run_test(|| { // start with relay block #0 and import head#5 of parachain#1 initialize(state_root_5); - assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); + let result = import_parachain_1_head(0, state_root_5, parachains_5, proof_5); + // first parachain head is imported for free + assert_eq!(result.unwrap().pays_fee, Pays::No); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { @@ -1024,7 +1145,9 @@ pub(crate) mod tests { // import head#10 of parachain#1 at relay block #1 let (relay_1_hash, justification) = proceed(1, state_root_10); - assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); + let result = import_parachain_1_head(1, state_root_10, parachains_10, proof_10); + // second parachain head is imported for fee + assert_eq!(result.unwrap().pays_fee, Pays::Yes); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { @@ -1647,4 +1770,143 @@ pub(crate) mod tests { ); }) } + + #[test] + fn may_be_free_for_submitting_filtered_heads() { + run_test(|| { + let (state_root, proof, parachains) = + prepare_parachain_heads_proof::(vec![(2, head_data(2, 5))]); + // start with relay block #0 and import head#5 of parachain#2 + initialize(state_root); + // first submission is free + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (0, test_relay_header(0, state_root).hash()), + parachains.clone(), + proof.clone(), + ); + assert_eq!(result.unwrap().pays_fee, Pays::No); + // next submission is NOT free, because we haven't updated anything + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (0, test_relay_header(0, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + // then we submit new head, proved at relay block `FreeHeadersInterval - 1` => Pays::Yes + let (state_root, proof, parachains) = prepare_parachain_heads_proof::< + RegularParachainHeader, + >(vec![(2, head_data(2, 50))]); + let relay_block_number = FreeHeadersInterval::get() - 1; + proceed(relay_block_number, state_root); + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + // then we submit new head, proved after `FreeHeadersInterval` => Pays::No + let (state_root, proof, parachains) = prepare_parachain_heads_proof::< + RegularParachainHeader, + >(vec![(2, head_data(2, 100))]); + let relay_block_number = relay_block_number + FreeHeadersInterval::get(); + proceed(relay_block_number, state_root); + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::No); + // then we submit new BIG head, proved after `FreeHeadersInterval` => Pays::Yes + // then we submit new head, proved after `FreeHeadersInterval` => Pays::No + let mut large_head = head_data(2, 100); + large_head.0.extend(&[42u8; BigParachain::MAX_HEADER_SIZE as _]); + let (state_root, proof, parachains) = + prepare_parachain_heads_proof::(vec![(2, large_head)]); + let relay_block_number = relay_block_number + FreeHeadersInterval::get(); + proceed(relay_block_number, state_root); + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + }) + } + + #[test] + fn grandpa_and_parachain_pallets_share_free_headers_counter() { + run_test(|| { + initialize(Default::default()); + // set free headers limit to `4` + let mut free_headers_remaining = 4; + pallet_bridge_grandpa::FreeHeadersRemaining::::set( + Some(free_headers_remaining), + ); + // import free GRANDPA and parachain headers + let mut relay_block_number = 0; + for i in 0..2 { + // import free GRANDPA header + let (state_root, proof, parachains) = prepare_parachain_heads_proof::< + RegularParachainHeader, + >(vec![(2, head_data(2, 5 + i))]); + relay_block_number = relay_block_number + FreeHeadersInterval::get(); + proceed(relay_block_number, state_root); + assert_eq!( + pallet_bridge_grandpa::FreeHeadersRemaining::< + TestRuntime, + BridgesGrandpaPalletInstance, + >::get(), + Some(free_headers_remaining - 1), + ); + free_headers_remaining = free_headers_remaining - 1; + // import free parachain header + assert_ok!(Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ),); + assert_eq!( + pallet_bridge_grandpa::FreeHeadersRemaining::< + TestRuntime, + BridgesGrandpaPalletInstance, + >::get(), + Some(free_headers_remaining - 1), + ); + free_headers_remaining = free_headers_remaining - 1; + } + // try to import free GRANDPA header => non-free execution + let (state_root, proof, parachains) = + prepare_parachain_heads_proof::(vec![(2, head_data(2, 7))]); + relay_block_number = relay_block_number + FreeHeadersInterval::get(); + let result = pallet_bridge_grandpa::Pallet::::submit_finality_proof_ex( + RuntimeOrigin::signed(1), + Box::new(test_relay_header(relay_block_number, state_root)), + make_default_justification(&test_relay_header(relay_block_number, state_root)), + TEST_GRANDPA_SET_ID, + false, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + // try to import free parachain header => non-free execution + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + assert_eq!( + pallet_bridge_grandpa::FreeHeadersRemaining::< + TestRuntime, + BridgesGrandpaPalletInstance, + >::get(), + Some(0), + ); + }); + } } diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index d9cbabf850e..dbb62845392 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -70,6 +70,7 @@ impl Chain for Parachain1 { impl Parachain for Parachain1 { const PARACHAIN_ID: u32 = 1; + const MAX_HEADER_SIZE: u32 = 1_024; } pub struct Parachain2; @@ -96,6 +97,7 @@ impl Chain for Parachain2 { impl Parachain for Parachain2 { const PARACHAIN_ID: u32 = 2; + const MAX_HEADER_SIZE: u32 = 1_024; } pub struct Parachain3; @@ -122,6 +124,7 @@ impl Chain for Parachain3 { impl Parachain for Parachain3 { const PARACHAIN_ID: u32 = 3; + const MAX_HEADER_SIZE: u32 = 1_024; } // this parachain is using u128 as block number and stored head data size exceeds limit @@ -149,6 +152,7 @@ impl Chain for BigParachain { impl Parachain for BigParachain { const PARACHAIN_ID: u32 = 4; + const MAX_HEADER_SIZE: u32 = 2_048; } construct_runtime! { @@ -168,12 +172,14 @@ impl frame_system::Config for TestRuntime { parameter_types! { pub const HeadersToKeep: u32 = 5; + pub const FreeHeadersInterval: u32 = 15; } impl pallet_bridge_grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = TestBridgedChain; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<2>; + type MaxFreeHeadersPerBlock = ConstU32<2>; + type FreeHeadersInterval = FreeHeadersInterval; type HeadersToKeep = HeadersToKeep; type WeightInfo = (); } @@ -181,7 +187,8 @@ impl pallet_bridge_grandpa::Config for TestRun impl pallet_bridge_grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = TestBridgedChain; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<2>; + type MaxFreeHeadersPerBlock = ConstU32<2>; + type FreeHeadersInterval = FreeHeadersInterval; type HeadersToKeep = HeadersToKeep; type WeightInfo = (); } diff --git a/bridges/modules/parachains/src/weights_ext.rs b/bridges/modules/parachains/src/weights_ext.rs index 393086a8569..64dad625de0 100644 --- a/bridges/modules/parachains/src/weights_ext.rs +++ b/bridges/modules/parachains/src/weights_ext.rs @@ -36,6 +36,20 @@ pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Extended weight info. pub trait WeightInfoExt: WeightInfo { + // Our configuration assumes that the runtime has special signed extensions used to: + // + // 1) boost priority of `submit_parachain_heads` transactions; + // + // 2) slash relayer if he submits an invalid transaction. + // + // We read and update storage values of other pallets (`pallet-bridge-relayers` and + // balances/assets pallet). So we need to add this weight to the weight of our call. + // Hence two following methods. + + /// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions + /// that are declared at runtime level. + fn submit_parachain_heads_overhead_from_runtime() -> Weight; + /// Storage proof overhead, that is included in every storage proof. /// /// The relayer would pay some extra fee for additional proof bytes, since they mean @@ -65,7 +79,10 @@ pub trait WeightInfoExt: WeightInfo { let pruning_weight = Self::parachain_head_pruning_weight(db_weight).saturating_mul(parachains_count as u64); - base_weight.saturating_add(proof_size_overhead).saturating_add(pruning_weight) + base_weight + .saturating_add(proof_size_overhead) + .saturating_add(pruning_weight) + .saturating_add(Self::submit_parachain_heads_overhead_from_runtime()) } /// Returns weight of single parachain head storage update. @@ -95,12 +112,20 @@ pub trait WeightInfoExt: WeightInfo { } impl WeightInfoExt for () { + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + Weight::zero() + } + fn expected_extra_storage_proof_size() -> u32 { EXTRA_STORAGE_PROOF_SIZE } } impl WeightInfoExt for BridgeWeight { + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + Weight::zero() + } + fn expected_extra_storage_proof_size() -> u32 { EXTRA_STORAGE_PROOF_SIZE } diff --git a/bridges/primitives/parachains/src/lib.rs b/bridges/primitives/parachains/src/lib.rs index 692bbd99ece..142c6e9b089 100644 --- a/bridges/primitives/parachains/src/lib.rs +++ b/bridges/primitives/parachains/src/lib.rs @@ -116,6 +116,10 @@ impl ParaStoredHeaderData { /// Stored parachain head data builder. pub trait ParaStoredHeaderDataBuilder { + /// Maximal parachain head size that we may accept for free. All heads above + /// this limit are submitted for a regular fee. + fn max_free_head_size() -> u32; + /// Return number of parachains that are supported by this builder. fn supported_parachains() -> u32; @@ -127,6 +131,10 @@ pub trait ParaStoredHeaderDataBuilder { pub struct SingleParaStoredHeaderDataBuilder(PhantomData); impl ParaStoredHeaderDataBuilder for SingleParaStoredHeaderDataBuilder { + fn max_free_head_size() -> u32 { + C::MAX_HEADER_SIZE + } + fn supported_parachains() -> u32 { 1 } @@ -147,6 +155,17 @@ impl ParaStoredHeaderDataBuilder for SingleParaStoredHeaderDataBui #[impl_trait_for_tuples::impl_for_tuples(1, 30)] #[tuple_types_custom_trait_bound(Parachain)] impl ParaStoredHeaderDataBuilder for C { + fn max_free_head_size() -> u32 { + let mut result = 0_u32; + for_tuples!( #( + result = sp_std::cmp::max( + result, + SingleParaStoredHeaderDataBuilder::::max_free_head_size(), + ); + )* ); + result + } + fn supported_parachains() -> u32 { let mut result = 0; for_tuples!( #( diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 4ec5a001a99..1b1c623104f 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -236,6 +236,12 @@ where pub trait Parachain: Chain { /// Parachain identifier. const PARACHAIN_ID: u32; + /// Maximal size of the parachain header. + /// + /// This isn't a strict limit. The relayer may submit larger headers and the + /// pallet will accept the call. The limit is only used to compute whether + /// the refund can be made. + const MAX_HEADER_SIZE: u32; } impl Parachain for T @@ -244,6 +250,8 @@ where ::Chain: Parachain, { const PARACHAIN_ID: u32 = <::Chain as Parachain>::PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = + <::Chain as Parachain>::MAX_HEADER_SIZE; } /// Adapter for `Get` to access `PARACHAIN_ID` from `trait Parachain` @@ -306,6 +314,11 @@ macro_rules! decl_bridge_finality_runtime_apis { pub const []: &str = stringify!([<$chain:camel FinalityApi_best_finalized>]); + /// Name of the `FinalityApi::free_headers_interval` runtime method. + pub const []: &str = + stringify!([<$chain:camel FinalityApi_free_headers_interval>]); + + $( /// Name of the `FinalityApi::accepted__finality_proofs` /// runtime method. @@ -322,6 +335,13 @@ macro_rules! decl_bridge_finality_runtime_apis { /// Returns number and hash of the best finalized header known to the bridge module. fn best_finalized() -> Option>; + /// Returns free headers interval, if it is configured in the runtime. + /// The caller expects that if his transaction improves best known header + /// at least by the free_headers_interval`, it will be fee-free. + /// + /// See [`pallet_bridge_grandpa::Config::FreeHeadersInterval`] for details. + fn free_headers_interval() -> Option; + $( /// Returns the justifications accepted in the current block. fn []( diff --git a/bridges/relays/client-substrate/src/test_chain.rs b/bridges/relays/client-substrate/src/test_chain.rs index 77240d15884..d1203a2c58e 100644 --- a/bridges/relays/client-substrate/src/test_chain.rs +++ b/bridges/relays/client-substrate/src/test_chain.rs @@ -110,6 +110,7 @@ impl bp_runtime::Chain for TestParachainBase { impl bp_runtime::Parachain for TestParachainBase { const PARACHAIN_ID: u32 = 1000; + const MAX_HEADER_SIZE: u32 = 1_024; } /// Parachain that may be used in tests. diff --git a/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml index 52271f94421..f59f689bf6b 100644 --- a/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml +++ b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml @@ -40,7 +40,7 @@ cumulus_based = true rpc_port = 8933 ws_port = 8943 args = [ - "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] # run bob as parachain collator @@ -51,7 +51,7 @@ cumulus_based = true rpc_port = 8934 ws_port = 8944 args = [ - "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] [[parachains]] @@ -65,14 +65,14 @@ cumulus_based = true ws_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] [[parachains.collators]] name = "asset-hub-rococo-collator2" command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] #[[hrmp_channels]] diff --git a/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml index f2550bcc995..6ab03ad5fe2 100644 --- a/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml +++ b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml @@ -40,7 +40,7 @@ cumulus_based = true rpc_port = 8935 ws_port = 8945 args = [ - "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] # run bob as parachain collator @@ -51,7 +51,7 @@ cumulus_based = true rpc_port = 8936 ws_port = 8946 args = [ - "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] [[parachains]] @@ -65,14 +65,14 @@ cumulus_based = true ws_port = 9010 command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] #[[hrmp_channels]] diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 41aa862be57..2f11692d97b 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -169,12 +169,107 @@ function run_relay() { --lane "${LANE_ID}" } +function run_finality_relay() { + local relayer_path=$(ensure_relayer) + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-headers rococo-to-bridge-hub-westend \ + --only-free-headers \ + --source-host localhost \ + --source-port 9942 \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Charlie \ + --target-transactions-mortality 4& + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-headers westend-to-bridge-hub-rococo \ + --only-free-headers \ + --source-host localhost \ + --source-port 9945 \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Charlie \ + --target-transactions-mortality 4 +} + +function run_parachains_relay() { + local relayer_path=$(ensure_relayer) + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-parachains rococo-to-bridge-hub-westend \ + --only-free-headers \ + --source-host localhost \ + --source-port 9942 \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Dave \ + --target-transactions-mortality 4& + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-parachains westend-to-bridge-hub-rococo \ + --only-free-headers \ + --source-host localhost \ + --source-port 9945 \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Dave \ + --target-transactions-mortality 4 +} + +function run_messages_relay() { + local relayer_path=$(ensure_relayer) + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-messages bridge-hub-rococo-to-bridge-hub-westend \ + --source-host localhost \ + --source-port 8943 \ + --source-version-mode Auto \ + --source-signer //Eve \ + --source-transactions-mortality 4 \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Eve \ + --target-transactions-mortality 4 \ + --lane $LANE_ID& + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-messages bridge-hub-westend-to-bridge-hub-rococo \ + --source-host localhost \ + --source-port 8945 \ + --source-version-mode Auto \ + --source-signer //Ferdie \ + --source-transactions-mortality 4 \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Ferdie \ + --target-transactions-mortality 4 \ + --lane $LANE_ID +} + case "$1" in run-relay) init_wnd_ro init_ro_wnd run_relay ;; + run-finality-relay) + init_wnd_ro + init_ro_wnd + run_finality_relay + ;; + run-parachains-relay) + run_parachains_relay + ;; + run-messages-relay) + run_messages_relay + ;; init-asset-hub-rococo-local) ensure_polkadot_js_api # create foreign assets for native Westend token (governance call on Rococo) @@ -386,6 +481,9 @@ case "$1" in echo "A command is require. Supported commands for: Local (zombienet) run: - run-relay + - run-finality-relay + - run-parachains-relay + - run-messages-relay - init-asset-hub-rococo-local - init-bridge-hub-rococo-local - init-asset-hub-westend-local diff --git a/bridges/testing/environments/rococo-westend/explorers.sh b/bridges/testing/environments/rococo-westend/explorers.sh new file mode 100755 index 00000000000..fb137726c93 --- /dev/null +++ b/bridges/testing/environments/rococo-westend/explorers.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Rococo AH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9910#/explorer& +# Rococo BH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer& + +# Westend BH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer& +# Westend AH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer& diff --git a/bridges/testing/environments/rococo-westend/helper.sh b/bridges/testing/environments/rococo-westend/helper.sh index 0a13ded213f..571c78fea58 100755 --- a/bridges/testing/environments/rococo-westend/helper.sh +++ b/bridges/testing/environments/rococo-westend/helper.sh @@ -1,3 +1,9 @@ #!/bin/bash -$ENV_PATH/bridges_rococo_westend.sh "$@" +if [ $1 == "auto-log" ]; then + shift # ignore "auto-log" + log_name=$1 + $ENV_PATH/bridges_rococo_westend.sh "$@" >$TEST_DIR/logs/$log_name.log +else + $ENV_PATH/bridges_rococo_westend.sh "$@" +fi diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh index cbd0b1bc623..a0ab00be144 100755 --- a/bridges/testing/environments/rococo-westend/spawn.sh +++ b/bridges/testing/environments/rococo-westend/spawn.sh @@ -59,12 +59,12 @@ if [[ $init -eq 1 ]]; then fi if [[ $start_relayer -eq 1 ]]; then - ${BASH_SOURCE%/*}/start_relayer.sh $rococo_dir $westend_dir relayer_pid + ${BASH_SOURCE%/*}/start_relayer.sh $rococo_dir $westend_dir finality_relayer_pid parachains_relayer_pid messages_relayer_pid fi echo $rococo_dir > $TEST_DIR/rococo.env echo $westend_dir > $TEST_DIR/westend.env echo -wait -n $rococo_pid $westend_pid $relayer_pid +wait -n $rococo_pid $westend_pid $finality_relayer_pid $parachains_relayer_pid $messages_relayer_pid kill -9 -$$ diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh index 7ddd312d395..9c57e4a6ab6 100755 --- a/bridges/testing/environments/rococo-westend/start_relayer.sh +++ b/bridges/testing/environments/rococo-westend/start_relayer.sh @@ -7,17 +7,31 @@ source "$FRAMEWORK_PATH/utils/zombienet.sh" rococo_dir=$1 westend_dir=$2 -__relayer_pid=$3 +__finality_relayer_pid=$3 +__parachains_relayer_pid=$4 +__messages_relayer_pid=$5 logs_dir=$TEST_DIR/logs helper_script="${BASH_SOURCE%/*}/helper.sh" -relayer_log=$logs_dir/relayer.log -echo -e "Starting rococo-westend relayer. Logs available at: $relayer_log\n" -start_background_process "$helper_script run-relay" $relayer_log relayer_pid +# start finality relayer +finality_relayer_log=$logs_dir/relayer_finality.log +echo -e "Starting rococo-westend finality relayer. Logs available at: $finality_relayer_log\n" +start_background_process "$helper_script run-finality-relay" $finality_relayer_log finality_relayer_pid + +# start parachains relayer +parachains_relayer_log=$logs_dir/relayer_parachains.log +echo -e "Starting rococo-westend parachains relayer. Logs available at: $parachains_relayer_log\n" +start_background_process "$helper_script run-parachains-relay" $parachains_relayer_log parachains_relayer_pid + +# start messages relayer +messages_relayer_log=$logs_dir/relayer_messages.log +echo -e "Starting rococo-westend messages relayer. Logs available at: $messages_relayer_log\n" +start_background_process "$helper_script run-messages-relay" $messages_relayer_log messages_relayer_pid run_zndsl ${BASH_SOURCE%/*}/rococo.zndsl $rococo_dir run_zndsl ${BASH_SOURCE%/*}/westend.zndsl $westend_dir -eval $__relayer_pid="'$relayer_pid'" - +eval $__finality_relayer_pid="'$finality_relayer_pid'" +eval $__parachains_relayer_pid="'$parachains_relayer_pid'" +eval $__messages_relayer_pid="'$messages_relayer_pid'" diff --git a/bridges/testing/framework/js-helpers/native-asset-balance.js b/bridges/testing/framework/js-helpers/native-asset-balance.js new file mode 100644 index 00000000000..4869eba35d8 --- /dev/null +++ b/bridges/testing/framework/js-helpers/native-asset-balance.js @@ -0,0 +1,12 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const accountAddress = args[0]; + const accountData = await api.query.system.account(accountAddress); + const accountBalance = accountData.data['free']; + console.log("Balance of " + accountAddress + ": " + accountBalance); + return accountBalance; +} + +module.exports = {run} diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index cdb7d28e940..6e26632fd9f 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -3,10 +3,10 @@ Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # send 5 ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 ROC on Westend AH asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 600 seconds -# check that the relayer //Charlie is rewarded by Westend AH -bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds +# relayer //Ferdie is rewarded for delivering messages from Rococo BH +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw,0x00000002,0x6268726F,ThisChain,0" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl new file mode 100644 index 00000000000..4839c19c0ff --- /dev/null +++ b/bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl @@ -0,0 +1,11 @@ +Description: Finality and parachain relays should have the constant balance, because their transactions are free +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/run.sh b/bridges/testing/tests/0001-asset-transfer/run.sh index a7bb122919b..227069932f2 100755 --- a/bridges/testing/tests/0001-asset-transfer/run.sh +++ b/bridges/testing/tests/0001-asset-transfer/run.sh @@ -18,8 +18,14 @@ ensure_process_file $env_pid $TEST_DIR/westend.env 300 westend_dir=`cat $TEST_DIR/westend.env` echo +run_zndsl ${BASH_SOURCE%/*}/roc-relayer-balance-does-not-change.zndsl $rococo_dir +run_zndsl ${BASH_SOURCE%/*}/wnd-relayer-balance-does-not-change.zndsl $westend_dir + run_zndsl ${BASH_SOURCE%/*}/roc-reaches-westend.zndsl $westend_dir run_zndsl ${BASH_SOURCE%/*}/wnd-reaches-rococo.zndsl $rococo_dir run_zndsl ${BASH_SOURCE%/*}/wroc-reaches-rococo.zndsl $rococo_dir run_zndsl ${BASH_SOURCE%/*}/wwnd-reaches-westend.zndsl $westend_dir + +run_zndsl ${BASH_SOURCE%/*}/roc-relayer-balance-does-not-change.zndsl $rococo_dir +run_zndsl ${BASH_SOURCE%/*}/wnd-relayer-balance-does-not-change.zndsl $westend_dir diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index dbc03864e2b..5a8d6dabc20 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -3,10 +3,10 @@ Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config # send 5 WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 WND on Rococo AH asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 600 seconds -# check that the relayer //Charlie is rewarded by Rococo AH -bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds +# relayer //Eve is rewarded for delivering messages from Westend BH +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL,0x00000002,0x62687764,ThisChain,0" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl new file mode 100644 index 00000000000..d2563e18078 --- /dev/null +++ b/bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl @@ -0,0 +1,11 @@ +Description: Finality and parachain relays should have the constant balance, because their transactions are free +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index f5a75aa03ac..574406ab305 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -22,6 +22,7 @@ scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +tuplex = { version = "0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -218,6 +219,7 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", + "tuplex/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 93ef9470363..5551b05e202 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -49,7 +49,8 @@ pub type BridgeGrandpaWestendInstance = pallet_bridge_grandpa::Instance3; impl pallet_bridge_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type BridgedChain = bp_westend::Westend; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<5>; type HeadersToKeep = RelayChainHeadersToKeep; type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } @@ -89,7 +90,8 @@ pub type BridgeGrandpaRococoBulletinInstance = pallet_bridge_grandpa::Instance4; impl pallet_bridge_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type BridgedChain = bp_polkadot_bulletin::PolkadotBulletin; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<5>; type HeadersToKeep = RelayChainHeadersToKeep; // Technically this is incorrect - we have two pallet instances and ideally we shall // benchmark every instance separately. But the benchmarking engine has a flaw - it diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 8845f0538b5..94b936889b7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -20,17 +20,15 @@ //! are reusing Polkadot Bulletin chain primitives everywhere here. use crate::{ - bridge_common_config::{BridgeGrandpaRococoBulletinInstance, BridgeHubRococo}, - weights, - xcm_config::UniversalLocation, - AccountId, BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, - RuntimeEvent, XcmOverRococoBulletin, XcmRouter, + bridge_common_config::BridgeHubRococo, weights, xcm_config::UniversalLocation, AccountId, + BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, + XcmOverRococoBulletin, XcmRouter, }; use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, messages, @@ -83,6 +81,9 @@ parameter_types! { pub const RococoPeopleToRococoBulletinMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN; + // see the `FEE_BOOST_PER_RELAY_HEADER` constant get the meaning of this value + pub PriorityBoostPerRelayHeader: u64 = 58_014_163_614_163; + /// Priority boost that the registered relayer receives for every additional message in the message /// delivery transaction. /// @@ -169,9 +170,8 @@ impl messages::BridgedChainWithMessages for RococoBulletin {} /// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin /// chain. pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< - RefundBridgedGrandpaMessages< + RefundBridgedMessages< Runtime, - BridgeGrandpaRococoBulletinInstance, RefundableMessagesLane< WithRococoBulletinMessagesInstance, RococoPeopleToRococoBulletinMessagesLane, @@ -244,6 +244,9 @@ mod tests { /// operational costs and a faster bridge), so this value should be significant. const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_RELAY_HEADER: Balance = 2 * rococo::currency::UNITS; + #[test] fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { check_message_lane_weights::< @@ -273,7 +276,13 @@ mod tests { // Bulletin chain - it has the same (almost) runtime for Polkadot Bulletin and Rococo // Bulletin, so we have to adhere Polkadot names here - bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< + Runtime, + BridgeGrandpaRococoBulletinInstance, + PriorityBoostPerRelayHeader, + >(FEE_BOOST_PER_RELAY_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_message::ensure_priority_boost_is_sane::< Runtime, WithRococoBulletinMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index e5a00073407..1681ac7f468 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -29,8 +29,8 @@ use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, + ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, }, messages, messages::{ @@ -65,6 +65,10 @@ parameter_types! { 2, [GlobalConsensus(WestendGlobalConsensusNetwork::get())] ); + // see the `FEE_BOOST_PER_RELAY_HEADER` constant get the meaning of this value + pub PriorityBoostPerRelayHeader: u64 = 32_007_814_407_814; + // see the `FEE_BOOST_PER_PARACHAIN_HEADER` constant get the meaning of this value + pub PriorityBoostPerParachainHeader: u64 = 1_396_340_903_540_903; // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; @@ -174,12 +178,8 @@ impl messages::BridgedChainWithMessages for BridgeHubWestend {} /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< + RefundBridgedMessages< Runtime, - RefundableParachain< - BridgeParachainWestendInstance, - bp_bridge_hub_westend::BridgeHubWestend, - >, RefundableMessagesLane< WithBridgeHubWestendMessagesInstance, AssetHubRococoToAssetHubWestendMessagesLane, @@ -246,6 +246,7 @@ mod tests { use crate::bridge_common_config::BridgeGrandpaWestendInstance; use bridge_runtime_common::{ assert_complete_bridge_types, + extensions::refund_relayer_extension::RefundableParachain, integrity::{ assert_complete_bridge_constants, check_message_lane_weights, AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, @@ -266,6 +267,11 @@ mod tests { /// operational costs and a faster bridge), so this value should be significant. const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_RELAY_HEADER: Balance = 2 * rococo::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_PARACHAIN_HEADER: Balance = 2 * rococo::currency::UNITS; + #[test] fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { check_message_lane_weights::< @@ -318,7 +324,19 @@ mod tests { }, }); - bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< + Runtime, + BridgeGrandpaWestendInstance, + PriorityBoostPerRelayHeader, + >(FEE_BOOST_PER_RELAY_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< + Runtime, + RefundableParachain, + PriorityBoostPerParachainHeader, + >(FEE_BOOST_PER_PARACHAIN_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_message::ensure_priority_boost_is_sane::< Runtime, WithBridgeHubWestendMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 109b081f937..7c2aa490886 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -35,6 +35,12 @@ pub mod bridge_to_westend_config; mod weights; pub mod xcm_config; +use bridge_runtime_common::extensions::{ + check_obsolete_extension::{ + CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, + }, + refund_relayer_extension::RefundableParachain, +}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{ @@ -63,7 +69,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -740,10 +746,28 @@ pub type XcmOverRococoBulletin = XcmOverPolkadotBulletin; bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeWestendGrandpa, - BridgeRococoBulletinGrandpa, + CheckAndBoostBridgeGrandpaTransactions< + Runtime, + bridge_common_config::BridgeGrandpaWestendInstance, + bridge_to_westend_config::PriorityBoostPerRelayHeader, + xcm_config::TreasuryAccount, + >, + CheckAndBoostBridgeGrandpaTransactions< + Runtime, + bridge_common_config::BridgeGrandpaRococoBulletinInstance, + bridge_to_bulletin_config::PriorityBoostPerRelayHeader, + xcm_config::TreasuryAccount, + >, // Parachains - BridgeWestendParachains, + CheckAndBoostBridgeParachainsTransactions< + Runtime, + RefundableParachain< + bridge_common_config::BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >, + bridge_to_westend_config::PriorityBoostPerParachainHeader, + xcm_config::TreasuryAccount, + >, // Messages BridgeWestendMessages, BridgeRococoBulletinMessages @@ -938,6 +962,11 @@ impl_runtime_apis! { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() } + fn free_headers_interval() -> Option { + >::FreeHeadersInterval::get() + } fn synced_headers_grandpa_info( ) -> Vec> { BridgeWestendGrandpa::synced_headers_grandpa_info() @@ -950,6 +979,10 @@ impl_runtime_apis! { bp_bridge_hub_westend::BridgeHubWestend >().unwrap_or(None) } + fn free_headers_interval() -> Option { + // "free interval" is not currently used for parachains + None + } } // This is exposed by BridgeHubRococo @@ -984,6 +1017,12 @@ impl_runtime_apis! { BridgePolkadotBulletinGrandpa::best_finalized() } + fn free_headers_interval() -> Option { + >::FreeHeadersInterval::get() + } + fn synced_headers_grandpa_info( ) -> Vec> { BridgePolkadotBulletinGrandpa::synced_headers_grandpa_info() diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index aac39a4564f..942f243141d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -17,8 +17,10 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_grandpa::WeightInfoExt as GrandpaWeightInfoExt; use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; +use ::pallet_bridge_relayers::WeightInfo as _; pub mod block_weights; pub mod cumulus_pallet_parachain_system; @@ -56,6 +58,16 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; +impl GrandpaWeightInfoExt for pallet_bridge_grandpa::WeightInfo { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } +} + impl MessagesWeightInfoExt for pallet_bridge_messages_rococo_to_rococo_bulletin::WeightInfo { @@ -94,4 +106,12 @@ impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } + + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 776c505fa64..b309232825d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -80,11 +80,10 @@ fn construct_and_apply_extrinsic( r.unwrap() } -fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> Balance { - let batch_call = RuntimeCall::Utility(batch); - let batch_info = batch_call.get_dispatch_info(); - let xt = construct_extrinsic(Alice, batch_call); - TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) +fn construct_and_estimate_extrinsic_fee(call: RuntimeCall) -> Balance { + let info = call.get_dispatch_info(); + let xt = construct_extrinsic(Alice, call); + TransactionPayment::compute_fee(xt.encoded_size() as _, &info, 0) } fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { @@ -376,20 +375,20 @@ mod bridge_hub_westend_tests { } #[test] - pub fn complex_relay_extrinsic_works() { - // for Westend - from_parachain::complex_relay_extrinsic_works::( + fn free_relay_extrinsic_works() { + // from Westend + from_parachain::free_relay_extrinsic_works::( collator_session_keys(), slot_durations(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, BridgeHubWestendChainId::get(), + SIBLING_PARACHAIN_ID, Rococo, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, || (), construct_and_apply_extrinsic, - ); + ) } #[test] @@ -414,12 +413,12 @@ mod bridge_hub_westend_tests { } #[test] - pub fn can_calculate_fee_for_complex_message_delivery_transaction() { + fn can_calculate_fee_for_standalone_message_delivery_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(), || { - from_parachain::can_calculate_fee_for_complex_message_delivery_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_delivery_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -433,12 +432,12 @@ mod bridge_hub_westend_tests { } #[test] - pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { + fn can_calculate_fee_for_standalone_message_confirmation_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(), || { - from_parachain::can_calculate_fee_for_complex_message_confirmation_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_confirmation_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -581,28 +580,28 @@ mod bridge_hub_bulletin_tests { } #[test] - pub fn complex_relay_extrinsic_works() { - // for Bulletin - from_grandpa_chain::complex_relay_extrinsic_works::( + fn free_relay_extrinsic_works() { + // from Bulletin + from_grandpa_chain::free_relay_extrinsic_works::( collator_session_keys(), slot_durations(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, RococoBulletinChainId::get(), + SIBLING_PARACHAIN_ID, Rococo, XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, || (), construct_and_apply_extrinsic, - ); + ) } #[test] - pub fn can_calculate_fee_for_complex_message_delivery_transaction() { + pub fn can_calculate_fee_for_standalone_message_delivery_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(), || { - from_grandpa_chain::can_calculate_fee_for_complex_message_delivery_transaction::< + from_grandpa_chain::can_calculate_fee_for_standalone_message_delivery_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -617,12 +616,12 @@ mod bridge_hub_bulletin_tests { } #[test] - pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { + pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(), || { - from_grandpa_chain::can_calculate_fee_for_complex_message_confirmation_transaction::< + from_grandpa_chain::can_calculate_fee_for_standalone_message_confirmation_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 86560caca99..a7241cc6d10 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -18,6 +18,7 @@ hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +tuplex = { version = "0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -180,6 +181,7 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", + "tuplex/std", "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index d5da41cce28..425b53da30f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -26,8 +26,8 @@ use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, + ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, }, messages, messages::{ @@ -70,6 +70,10 @@ parameter_types! { 2, [GlobalConsensus(RococoGlobalConsensusNetwork::get())] ); + // see the `FEE_BOOST_PER_RELAY_HEADER` constant get the meaning of this value + pub PriorityBoostPerRelayHeader: u64 = 32_007_814_407_814; + // see the `FEE_BOOST_PER_PARACHAIN_HEADER` constant get the meaning of this value + pub PriorityBoostPerParachainHeader: u64 = 1_396_340_903_540_903; // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; @@ -191,9 +195,8 @@ impl ThisChainWithMessages for BridgeHubWestend { /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< + RefundBridgedMessages< Runtime, - RefundableParachain, RefundableMessagesLane< WithBridgeHubRococoMessagesInstance, AssetHubWestendToAssetHubRococoMessagesLane, @@ -210,7 +213,8 @@ pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance1; impl pallet_bridge_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type BridgedChain = bp_rococo::Rococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<5>; type HeadersToKeep = RelayChainHeadersToKeep; type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } @@ -281,6 +285,7 @@ mod tests { use super::*; use bridge_runtime_common::{ assert_complete_bridge_types, + extensions::refund_relayer_extension::RefundableParachain, integrity::{ assert_complete_bridge_constants, check_message_lane_weights, AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, @@ -301,6 +306,11 @@ mod tests { /// operational costs and a faster bridge), so this value should be significant. const FEE_BOOST_PER_MESSAGE: Balance = 2 * westend::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_RELAY_HEADER: Balance = 2 * westend::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_PARACHAIN_HEADER: Balance = 2 * westend::currency::UNITS; + #[test] fn ensure_bridge_hub_westend_message_lane_weights_are_correct() { check_message_lane_weights::< @@ -352,7 +362,19 @@ mod tests { }, }); - bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< + Runtime, + BridgeGrandpaRococoInstance, + PriorityBoostPerRelayHeader, + >(FEE_BOOST_PER_RELAY_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< + Runtime, + RefundableParachain, + PriorityBoostPerParachainHeader, + >(FEE_BOOST_PER_PARACHAIN_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_message::ensure_priority_boost_is_sane::< Runtime, WithBridgeHubRococoMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index cf09a1acc54..640eaf881a5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -32,6 +32,12 @@ pub mod bridge_to_rococo_config; mod weights; pub mod xcm_config; +use bridge_runtime_common::extensions::{ + check_obsolete_extension::{ + CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, + }, + refund_relayer_extension::RefundableParachain, +}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::ParaId; use sp_api::impl_runtime_apis; @@ -57,7 +63,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -502,9 +508,22 @@ construct_runtime!( bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeRococoGrandpa, + CheckAndBoostBridgeGrandpaTransactions< + Runtime, + bridge_to_rococo_config::BridgeGrandpaRococoInstance, + bridge_to_rococo_config::PriorityBoostPerRelayHeader, + xcm_config::TreasuryAccount, + >, // Parachains - BridgeRococoParachains, + CheckAndBoostBridgeParachainsTransactions< + Runtime, + RefundableParachain< + bridge_to_rococo_config::BridgeParachainRococoInstance, + bp_bridge_hub_rococo::BridgeHubRococo, + >, + bridge_to_rococo_config::PriorityBoostPerParachainHeader, + xcm_config::TreasuryAccount, + >, // Messages BridgeRococoMessages } @@ -692,6 +711,11 @@ impl_runtime_apis! { fn best_finalized() -> Option> { BridgeRococoGrandpa::best_finalized() } + fn free_headers_interval() -> Option { + >::FreeHeadersInterval::get() + } fn synced_headers_grandpa_info( ) -> Vec> { BridgeRococoGrandpa::synced_headers_grandpa_info() @@ -704,6 +728,10 @@ impl_runtime_apis! { bp_bridge_hub_rococo::BridgeHubRococo >().unwrap_or(None) } + fn free_headers_interval() -> Option { + // "free interval" is not currently used for parachains + None + } } impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index a65ee31d3e5..245daaf8ed9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -17,8 +17,10 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_grandpa::WeightInfoExt as GrandpaWeightInfoExt; use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; +use ::pallet_bridge_relayers::WeightInfo as _; pub mod block_weights; pub mod cumulus_pallet_parachain_system; @@ -51,6 +53,16 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; +impl GrandpaWeightInfoExt for pallet_bridge_grandpa::WeightInfo { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } +} + impl MessagesWeightInfoExt for pallet_bridge_messages::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE @@ -70,4 +82,12 @@ impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo u32 { bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE } + + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 988b10e1e2d..836594140b2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -94,11 +94,10 @@ fn construct_and_apply_extrinsic( r.unwrap() } -fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> Balance { - let batch_call = RuntimeCall::Utility(batch); - let batch_info = batch_call.get_dispatch_info(); - let xt = construct_extrinsic(Alice, batch_call); - TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) +fn construct_and_estimate_extrinsic_fee(call: RuntimeCall) -> Balance { + let info = call.get_dispatch_info(); + let xt = construct_extrinsic(Alice, call); + TransactionPayment::compute_fee(xt.encoded_size() as _, &info, 0) } fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { @@ -271,22 +270,6 @@ fn relayed_incoming_message_works() { ) } -#[test] -pub fn complex_relay_extrinsic_works() { - from_parachain::complex_relay_extrinsic_works::( - collator_session_keys(), - slot_durations(), - bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubRococoChainId::get(), - Westend, - XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, - || (), - construct_and_apply_extrinsic, - ); -} - #[test] pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { bridge_hub_test_utils::check_sane_fees_values( @@ -309,12 +292,12 @@ pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { } #[test] -pub fn can_calculate_fee_for_complex_message_delivery_transaction() { +pub fn can_calculate_fee_for_standalone_message_delivery_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds", bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds::get(), || { - from_parachain::can_calculate_fee_for_complex_message_delivery_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_delivery_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -328,12 +311,12 @@ pub fn can_calculate_fee_for_complex_message_delivery_transaction() { } #[test] -pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { +pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds", bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds::get(), || { - from_parachain::can_calculate_fee_for_complex_message_confirmation_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_confirmation_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index 8aaaa4f59d7..bfa2f0f50f9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -41,6 +41,7 @@ use frame_system::pallet_prelude::BlockNumberFor; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; +use sp_core::Get; use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -162,7 +163,14 @@ pub fn relayed_incoming_message_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< RuntimeHelper::MB, (), - >(lane_id, xcm.into(), message_nonce, message_destination, relay_header_number); + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + relay_header_number, + false, + ); let relay_chain_header_hash = relay_chain_header.hash(); vec![ @@ -202,6 +210,142 @@ pub fn relayed_incoming_message_works( ); } +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, message) independently submitted. +/// Finality proof is submitted for free in this test. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn free_relay_extrinsic_works( + collator_session_key: CollatorSessionKeys, + slot_durations: SlotDurations, + runtime_para_id: u32, + bridged_chain_id: bp_runtime::ChainId, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + RuntimeCallOf, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteGrandpaChainHelper, + RuntimeHelper::Runtime: pallet_balances::Config, + AccountIdOf: From, + RuntimeCallOf: From> + + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + // ensure that the runtime allows free header submissions + let free_headers_interval = >::FreeHeadersInterval::get() + .expect("this test requires runtime, configured to accept headers for free; qed"); + + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + slot_durations, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + prepare_configuration(); + + // start with bridged relay chain block#0 + let initial_block_number = 0; + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::( + initial_block_number, + ), + ); + + // free relay chain header is `0 + free_headers_interval` + let relay_header_number = initial_block_number + free_headers_interval; + + // relayer balance shall not change after relay and para header submissions + let initial_relayer_balance = + pallet_balances::Pallet::::free_balance( + relayer_id_at_this_chain.clone(), + ); + + // initialize the `FreeHeadersRemaining` storage value + pallet_bridge_grandpa::Pallet::::on_initialize( + 0u32.into(), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let (relay_chain_header, grandpa_justification, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + relay_header_number.into(), + true, + ); + + let relay_chain_header_hash = relay_chain_header.hash(); + vec![ + ( + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + Box::new(( + helpers::VerifySubmitGrandpaFinalityProofOutcome::::expect_best_header_hash( + relay_chain_header_hash, + ), + helpers::VerifyRelayerBalance::::expect_relayer_balance( + relayer_id_at_this_chain.clone(), + initial_relayer_balance, + ), + )) + ), + ( + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + Box::new(( + helpers::VerifySubmitMessagesProofOutcome::::expect_last_delivered_nonce( + lane_id, + 1, + ), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + ), + ] + }, + ); +} + /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, /// with proofs (finality, message) batched together in signed extrinsic. /// Also verifies relayer transaction signed extensions work as intended. @@ -265,7 +409,14 @@ pub fn complex_relay_extrinsic_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< RuntimeHelper::MB, (), - >(lane_id, xcm.into(), message_nonce, message_destination, relay_header_number); + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + relay_header_number, + false, + ); let relay_chain_header_hash = relay_chain_header.hash(); vec![( @@ -344,6 +495,7 @@ where 1, [GlobalConsensus(Polkadot), Parachain(1_000)].into(), 1u32.into(), + false, ); // generate batch call that provides finality for bridged relay and parachains + message @@ -423,3 +575,109 @@ where compute_extrinsic_fee(batch) }) } + +/// Estimates transaction fee for default message delivery transaction from bridged GRANDPA chain. +pub fn can_calculate_fee_for_standalone_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteGrandpaChainHelper, + RuntimeCallOf: + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let (_, _, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >( + LaneId::default(), + vec![Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + [GlobalConsensus(Polkadot), Parachain(1_000)].into(), + 1u32.into(), + false, + ); + + let call = test_data::from_grandpa_chain::make_standalone_relayer_delivery_call::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::MPI, + >( + message_proof, + helpers::relayer_id_at_bridged_chain::(), + ); + + compute_extrinsic_fee(call) + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_standalone_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteGrandpaChainHelper, + AccountIdOf: From, + MessageThisChain: + bp_runtime::Chain>, + RuntimeCallOf: + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::TargetHeaderChain: + TargetHeaderChain< + XcmAsPlainPayload, + AccountIdOf, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let (_, _, message_delivery_proof) = + test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< + RuntimeHelper::MB, + (), + >( + LaneId::default(), + 1u32.into(), + AccountId32::from(Alice.public()).into(), + unrewarded_relayers.clone(), + ); + + let call = test_data::from_grandpa_chain::make_standalone_relayer_confirmation_call::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::MPI, + >(message_delivery_proof, unrewarded_relayers); + + compute_extrinsic_fee(call) + }) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index 72ec0718acf..12ab382d9e0 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -42,6 +42,7 @@ use frame_system::pallet_prelude::BlockNumberFor; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; +use sp_core::Get; use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -188,6 +189,7 @@ pub fn relayed_incoming_message_works( para_header_number, relay_header_number, bridged_para_id, + false, ); let parachain_head_hash = parachain_head.hash(); @@ -241,6 +243,177 @@ pub fn relayed_incoming_message_works( ); } +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, para heads, message) independently submitted. +/// Finality and para heads are submitted for free in this test. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn free_relay_extrinsic_works( + collator_session_key: CollatorSessionKeys, + slot_durations: SlotDurations, + runtime_para_id: u32, + bridged_para_id: u32, + bridged_chain_id: bp_runtime::ChainId, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + ::RuntimeCall, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteParachainHelper, + RuntimeHelper::Runtime: pallet_balances::Config, + AccountIdOf: From, + RuntimeCallOf: From> + + From> + + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + // ensure that the runtime allows free header submissions + let free_headers_interval = >::FreeHeadersInterval::get() + .expect("this test requires runtime, configured to accept headers for free; qed"); + + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + slot_durations, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + prepare_configuration(); + + // start with bridged relay chain block#0 + let initial_block_number = 0; + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::( + initial_block_number, + ), + ); + + // free relay chain header is `0 + free_headers_interval` + let relay_header_number = initial_block_number + free_headers_interval; + // first parachain header is always submitted for free + let para_header_number = 1; + + // relayer balance shall not change after relay and para header submissions + let initial_relayer_balance = + pallet_balances::Pallet::::free_balance( + relayer_id_at_this_chain.clone(), + ); + + // initialize the `FreeHeadersRemaining` storage value + pallet_bridge_grandpa::Pallet::::on_initialize( + 0u32.into(), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let ( + relay_chain_header, + grandpa_justification, + parachain_head, + parachain_heads, + para_heads_proof, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + para_header_number, + relay_header_number, + bridged_para_id, + true, + ); + + let parachain_head_hash = parachain_head.hash(); + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + vec![ + ( + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + Box::new(( + helpers::VerifySubmitGrandpaFinalityProofOutcome::::expect_best_header_hash( + relay_chain_header_hash, + ), + helpers::VerifyRelayerBalance::::expect_relayer_balance( + relayer_id_at_this_chain.clone(), + initial_relayer_balance, + ), + )), + ), + ( + BridgeParachainsCall::::submit_parachain_heads { + at_relay_block: (relay_chain_header_number, relay_chain_header_hash), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }.into(), + Box::new(( + helpers::VerifySubmitParachainHeaderProofOutcome::::expect_best_header_hash( + bridged_para_id, + parachain_head_hash, + ), + /*helpers::VerifyRelayerBalance::::expect_relayer_balance( + relayer_id_at_this_chain.clone(), + initial_relayer_balance, + ),*/ + )), + ), + ( + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + Box::new(( + helpers::VerifySubmitMessagesProofOutcome::::expect_last_delivered_nonce( + lane_id, + 1, + ), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + ), + ] + }, + ); +} + /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, /// with proofs (finality, para heads, message) batched together in signed extrinsic. /// Also verifies relayer transaction signed extensions work as intended. @@ -325,6 +498,7 @@ pub fn complex_relay_extrinsic_works( para_header_number, relay_header_number, bridged_para_id, + false, ); let parachain_head_hash = parachain_head.hash(); @@ -428,6 +602,7 @@ where 1, 5, 1_000, + false, ); // generate batch call that provides finality for bridged relay and parachains + message @@ -527,3 +702,126 @@ where compute_extrinsic_fee(batch) }) } + +/// Estimates transaction fee for default message delivery transaction from bridged parachain. +pub fn can_calculate_fee_for_standalone_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteParachainHelper, + RuntimeCallOf: + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let ( + _, + _, + _, + _, + _, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + LaneId::default(), + vec![Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + [GlobalConsensus(Polkadot), Parachain(1_000)].into(), + 1, + 5, + 1_000, + false, + ); + + let call = test_data::from_parachain::make_standalone_relayer_delivery_call::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + _, + >( + message_proof, + helpers::relayer_id_at_bridged_chain::(), + ); + + compute_extrinsic_fee(call) + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_standalone_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteParachainHelper, + AccountIdOf: From, + MessageThisChain: + bp_runtime::Chain>, + RuntimeCallOf: + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::TargetHeaderChain: + TargetHeaderChain< + XcmAsPlainPayload, + AccountIdOf, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let (_, _, _, _, _, message_delivery_proof) = + test_data::from_parachain::make_complex_relayer_confirmation_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + LaneId::default(), + 1, + 5, + 1_000, + AccountId32::from(Alice.public()).into(), + unrewarded_relayers.clone(), + ); + + let call = test_data::from_parachain::make_standalone_relayer_confirmation_call::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + >(message_delivery_proof, unrewarded_relayers); + + compute_extrinsic_fee(call) + }) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index 2b48f2e3d51..0ce049cd1c4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -193,6 +193,34 @@ where } } +/// Verifies that relayer balance is equal to given value. +pub struct VerifyRelayerBalance { + relayer: Runtime::AccountId, + balance: Runtime::Balance, +} + +impl VerifyRelayerBalance +where + Runtime: pallet_balances::Config, +{ + /// Expect given relayer balance after transaction. + pub fn expect_relayer_balance( + relayer: Runtime::AccountId, + balance: Runtime::Balance, + ) -> Box { + Box::new(Self { relayer, balance }) + } +} + +impl VerifyTransactionOutcome for VerifyRelayerBalance +where + Runtime: pallet_balances::Config, +{ + fn verify_outcome(&self) { + assert_eq!(pallet_balances::Pallet::::free_balance(&self.relayer), self.balance,); + } +} + /// Initialize bridge GRANDPA pallet. pub(crate) fn initialize_bridge_grandpa_pallet( init_data: bp_header_chain::InitializationData>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs index 017ec0fd540..e5d5e7cac96 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -121,6 +121,60 @@ where } } +/// Prepare a call with message proof. +pub fn make_standalone_relayer_delivery_call( + message_proof: FromBridgedChainMessagesProof>>, + relayer_id_at_bridged_chain: AccountIdOf>, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = AccountIdOf>, + >, + MPI: 'static, + >::SourceHeaderChain: SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof>>, + >, + Runtime::RuntimeCall: From>, +{ + pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + } + .into() +} + +/// Prepare a call with message delivery proof. +pub fn make_standalone_relayer_confirmation_call( + message_delivery_proof: FromBridgedChainMessagesDeliveryProof< + HashOf>, + >, + relayers_state: UnrewardedRelayersState, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config, + MPI: 'static, + >::TargetHeaderChain: TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>, + >, + >, + Runtime::RuntimeCall: From>, +{ + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state, + } + .into() +} + /// Prepare storage proofs of messages, stored at the (bridged) source GRANDPA chain. pub fn make_complex_relayer_delivery_proofs( lane_id: LaneId, @@ -128,6 +182,7 @@ pub fn make_complex_relayer_delivery_proofs( message_nonce: MessageNonce, message_destination: Junctions, header_number: BlockNumberOf>, + is_minimal_call: bool, ) -> ( HeaderOf>, GrandpaJustification>>, @@ -153,7 +208,7 @@ where let (header, justification) = make_complex_bridged_grandpa_header_proof::< MessageBridgedChain, - >(state_root, header_number); + >(state_root, header_number, is_minimal_call); let message_proof = FromBridgedChainMessagesProof { bridged_header_hash: header.hash(), @@ -200,8 +255,11 @@ where StorageProofSize::Minimal(0), ); - let (header, justification) = - make_complex_bridged_grandpa_header_proof::(state_root, header_number); + let (header, justification) = make_complex_bridged_grandpa_header_proof::( + state_root, + header_number, + false, + ); let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: header.hash(), @@ -216,6 +274,7 @@ where pub fn make_complex_bridged_grandpa_header_proof( state_root: HashOf, header_number: BlockNumberOf, + is_minimal_call: bool, ) -> (HeaderOf, GrandpaJustification>) where BridgedChain: ChainWithGrandpa, @@ -229,7 +288,9 @@ where // `submit_finality_proof` call size would be close to maximal expected (and refundable) let extra_bytes_required = maximal_expected_submit_finality_proof_call_size::() .saturating_sub(header.encoded_size()); - header.digest_mut().push(DigestItem::Other(vec![42; extra_bytes_required])); + if !is_minimal_call { + header.digest_mut().push(DigestItem::Other(vec![42; extra_bytes_required])); + } let justification = make_default_justification(&header); (header, justification) diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs index 932ba231239..5d3cba4e53b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -159,6 +159,52 @@ where } } +/// Prepare a call with message proof. +pub fn make_standalone_relayer_delivery_call( + message_proof: FromBridgedChainMessagesProof, + relayer_id_at_bridged_chain: InboundRelayer, +) -> Runtime::RuntimeCall where + Runtime: pallet_bridge_messages::Config< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = InboundRelayer, + >, + MPI: 'static, + Runtime::RuntimeCall: From>, + <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: + From>, +{ + pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), + proof: message_proof.into(), + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + } + .into() +} + +/// Prepare a call with message delivery proof. +pub fn make_standalone_relayer_confirmation_call( + message_delivery_proof: FromBridgedChainMessagesDeliveryProof, + relayers_state: UnrewardedRelayersState, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_messages::Config, + MPI: 'static, + Runtime::RuntimeCall: From>, + >::TargetHeaderChain: TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, + >, +{ + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state, + } + .into() +} + /// Prepare storage proofs of messages, stored at the source chain. pub fn make_complex_relayer_delivery_proofs( lane_id: LaneId, @@ -168,6 +214,7 @@ pub fn make_complex_relayer_delivery_proofs ( HeaderOf, GrandpaJustification>, @@ -201,6 +248,7 @@ where para_header_number, relay_header_number, bridged_para_id, + is_minimal_call, ); let message_proof = FromBridgedChainMessagesProof { @@ -266,6 +314,7 @@ where para_header_number, relay_header_number, bridged_para_id, + false, ); let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { @@ -290,6 +339,7 @@ pub fn make_complex_bridged_parachain_heads_proof( para_header_number: u32, relay_header_number: BlockNumberOf, bridged_para_id: u32, + is_minimal_call: bool, ) -> ( HeaderOf, GrandpaJustification>, @@ -319,9 +369,12 @@ where )]); assert_eq!(bridged_para_head.hash(), parachain_heads[0].1); - let (relay_chain_header, justification) = make_complex_bridged_grandpa_header_proof::< - BridgedRelayChain, - >(relay_state_root, relay_header_number); + let (relay_chain_header, justification) = + make_complex_bridged_grandpa_header_proof::( + relay_state_root, + relay_header_number, + is_minimal_call, + ); (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) } diff --git a/prdoc/pr_4102.prdoc b/prdoc/pr_4102.prdoc new file mode 100644 index 00000000000..50c1ec23b2a --- /dev/null +++ b/prdoc/pr_4102.prdoc @@ -0,0 +1,43 @@ +title: "Bridge: make some headers submissions free" + +doc: + - audience: Runtime Dev + description: | + Adds `FreeHeadersInterval` configuration constant to the `pallet_bridge_grandpa`. + Transactions that improve best known header by at least `FreeHeadersInterval` headers + are now free for the submitter. Additionally, we allow single free parachain header + update per every free relay chain header. Bridge signed extensions are adjusted + to support that new scheme. Bridge runtime APIs are extended to support that new + scheme. Bridge fees are decreased by ~98% because now they do not include cost of + finality submissions - we assume relayers will be submitting finality transactions + for free. + +crates: + - name: bridge-runtime-common + bump: major + - name: bp-bridge-hub-cumulus + bump: patch + - name: bp-bridge-hub-kusama + bump: major + - name: bp-bridge-hub-polkadot + bump: major + - name: bp-bridge-hub-rococo + bump: major + - name: bp-bridge-hub-westend + bump: major + - name: pallet-bridge-grandpa + bump: major + - name: pallet-bridge-parachains + bump: major + - name: bp-parachains + bump: major + - name: bp-runtime + bump: major + - name: relay-substrate-client + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: bridge-hub-test-utils + bump: minor -- GitLab From 7e68b2b8da9caf634ff4f6c6d96d2d7914c44fb7 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 25 Apr 2024 10:20:17 +0300 Subject: [PATCH 069/107] Bridge: added free headers submission support to the substrate-relay (#4157) Original PR: https://github.com/paritytech/parity-bridges-common/pull/2884. Since chain-specific code lives in the `parity-bridges-common` repo, some parts of original PR will require another PR --------- Co-authored-by: Adrian Catangiu --- bridges/chains/chain-kusama/src/lib.rs | 2 + bridges/chains/chain-polkadot/src/lib.rs | 2 + bridges/chains/chain-rococo/src/lib.rs | 2 + bridges/chains/chain-westend/src/lib.rs | 2 + bridges/relays/client-substrate/src/chain.rs | 9 + .../relays/client-substrate/src/test_chain.rs | 2 + bridges/relays/finality/README.md | 4 +- bridges/relays/finality/src/finality_loop.rs | 152 +++++++-- bridges/relays/finality/src/headers.rs | 143 ++++++++- bridges/relays/finality/src/lib.rs | 4 +- bridges/relays/finality/src/mock.rs | 5 + .../src/cli/relay_headers.rs | 18 +- .../src/cli/relay_headers_and_messages/mod.rs | 17 +- .../parachain_to_parachain.rs | 4 +- .../relay_to_parachain.rs | 4 +- .../relay_to_relay.rs | 4 +- .../src/cli/relay_parachains.rs | 14 +- .../lib-substrate-relay/src/finality/mod.rs | 16 +- .../src/finality/target.rs | 21 +- bridges/relays/lib-substrate-relay/src/lib.rs | 3 + .../src/on_demand/headers.rs | 16 +- .../src/on_demand/parachains.rs | 14 +- .../lib-substrate-relay/src/parachains/mod.rs | 2 + .../src/parachains/target.rs | 109 +++++-- .../relays/parachains/src/parachains_loop.rs | 303 ++++++++++++++++-- prdoc/pr_4157.prdoc | 29 ++ 26 files changed, 769 insertions(+), 132 deletions(-) create mode 100644 prdoc/pr_4157.prdoc diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index a81004afe81..fd7172c5869 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -67,6 +67,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Kusama GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_KUSAMA_GRANDPA_PALLET_NAME: &str = "BridgeKusamaGrandpa"; +/// Name of the With-Kusama parachains pallet instance that is deployed at bridged chains. +pub const WITH_KUSAMA_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeKusamaParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Polkadot /// parachains. diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index 00d35783a9b..a8cac0467d5 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -69,6 +69,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Polkadot GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_POLKADOT_GRANDPA_PALLET_NAME: &str = "BridgePolkadotGrandpa"; +/// Name of the With-Polkadot parachains pallet instance that is deployed at bridged chains. +pub const WITH_POLKADOT_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgePolkadotParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Polkadot /// parachains. diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index 2385dd2cbb2..b290fe71c82 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -67,6 +67,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Rococo GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_ROCOCO_GRANDPA_PALLET_NAME: &str = "BridgeRococoGrandpa"; +/// Name of the With-Rococo parachains pallet instance that is deployed at bridged chains. +pub const WITH_ROCOCO_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeRococoParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Rococo /// parachains. diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index b344b7f4bf9..ef451f7de0a 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -67,6 +67,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Westend GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_WESTEND_GRANDPA_PALLET_NAME: &str = "BridgeWestendGrandpa"; +/// Name of the With-Westend parachains pallet instance that is deployed at bridged chains. +pub const WITH_WESTEND_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeWestendParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Westend /// parachains. diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 2aba5f5674d..40269fe64c8 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -46,6 +46,12 @@ pub trait Chain: ChainBase + Clone { /// Keep in mind that this method is normally provided by the other chain, which is /// bridged with this chain. const BEST_FINALIZED_HEADER_ID_METHOD: &'static str; + /// Name of the runtime API method that is returning interval between source chain + /// headers that may be submitted for free to the target chain. + /// + /// Keep in mind that this method is normally provided by the other chain, which is + /// bridged with this chain. + const FREE_HEADERS_INTERVAL_METHOD: &'static str; /// Average block interval. /// @@ -75,6 +81,9 @@ pub trait ChainWithRuntimeVersion: Chain { pub trait RelayChain: Chain { /// Name of the `runtime_parachains::paras` pallet in the runtime of this chain. const PARAS_PALLET_NAME: &'static str; + /// Name of the `pallet-bridge-parachains`, deployed at the **bridged** chain to sync + /// parachains of **this** chain. + const WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME: &'static str; } /// Substrate-based chain that is using direct GRANDPA finality from minimal relay-client point of diff --git a/bridges/relays/client-substrate/src/test_chain.rs b/bridges/relays/client-substrate/src/test_chain.rs index d1203a2c58e..cfd241c022a 100644 --- a/bridges/relays/client-substrate/src/test_chain.rs +++ b/bridges/relays/client-substrate/src/test_chain.rs @@ -56,6 +56,7 @@ impl bp_runtime::Chain for TestChain { impl Chain for TestChain { const NAME: &'static str = "Test"; const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "TestMethod"; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = "TestMethod"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); type SignedBlock = sp_runtime::generic::SignedBlock< @@ -124,6 +125,7 @@ impl bp_runtime::UnderlyingChainProvider for TestParachain { impl Chain for TestParachain { const NAME: &'static str = "TestParachain"; const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "TestParachainMethod"; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = "TestParachainMethod"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); type SignedBlock = sp_runtime::generic::SignedBlock< diff --git a/bridges/relays/finality/README.md b/bridges/relays/finality/README.md index 92e765cea0e..89b9d139958 100644 --- a/bridges/relays/finality/README.md +++ b/bridges/relays/finality/README.md @@ -33,7 +33,9 @@ node. The transaction is then tracked by the relay until it is mined and finaliz The main entrypoint for the crate is the [`run` function](./src/finality_loop.rs), which takes source and target clients and [`FinalitySyncParams`](./src/finality_loop.rs) parameters. The most important parameter is the `only_mandatory_headers` - it is set to `true`, the relay will only submit mandatory headers. Since transactions -with mandatory headers are fee-free, the cost of running such relay is zero (in terms of fees). +with mandatory headers are fee-free, the cost of running such relay is zero (in terms of fees). If a similar, +`only_free_headers` parameter, is set to `true`, then free headers (if configured in the runtime) are also +relayed. ## Finality Relay Metrics diff --git a/bridges/relays/finality/src/finality_loop.rs b/bridges/relays/finality/src/finality_loop.rs index e31d8a70812..8b3def868a4 100644 --- a/bridges/relays/finality/src/finality_loop.rs +++ b/bridges/relays/finality/src/finality_loop.rs @@ -29,7 +29,7 @@ use crate::{ use async_trait::async_trait; use backoff::{backoff::Backoff, ExponentialBackoff}; use futures::{future::Fuse, select, Future, FutureExt}; -use num_traits::Saturating; +use num_traits::{Saturating, Zero}; use relay_utils::{ metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient, HeaderId, MaybeConnectionError, TrackedTransactionStatus, TransactionTracker, @@ -39,6 +39,17 @@ use std::{ time::{Duration, Instant}, }; +/// Type of headers that we relay. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum HeadersToRelay { + /// Relay all headers. + All, + /// Relay only mandatory headers. + Mandatory, + /// Relay only free (including mandatory) headers. + Free, +} + /// Finality proof synchronization loop parameters. #[derive(Debug, Clone)] pub struct FinalitySyncParams { @@ -63,7 +74,7 @@ pub struct FinalitySyncParams { /// Timeout before we treat our transactions as lost and restart the whole sync process. pub stall_timeout: Duration, /// If true, only mandatory headers are relayed. - pub only_mandatory_headers: bool, + pub headers_to_relay: HeadersToRelay, } /// Source client used in finality synchronization loop. @@ -90,11 +101,16 @@ pub trait TargetClient: RelayClient { &self, ) -> Result, Self::Error>; + /// Get free source headers submission interval, if it is configured in the + /// target runtime. + async fn free_source_headers_interval(&self) -> Result, Self::Error>; + /// Submit header finality proof. async fn submit_finality_proof( &self, header: P::Header, proof: P::FinalityProof, + is_free_execution_expected: bool, ) -> Result; } @@ -104,9 +120,13 @@ pub fn metrics_prefix() -> String { format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) } +/// Finality sync information. pub struct SyncInfo { + /// Best finalized header at the source client. pub best_number_at_source: P::Number, + /// Best source header, known to the target client. pub best_number_at_target: P::Number, + /// Whether the target client follows the same fork as the source client do. pub is_using_same_fork: bool, } @@ -183,6 +203,7 @@ impl Transaction Result { let header_number = header.number(); log::debug!( @@ -193,7 +214,9 @@ impl Transaction, TC: TargetClient

> Finality pub async fn select_header_to_submit( &mut self, info: &SyncInfo

, + free_headers_interval: Option, ) -> Result>, Error> { // to see that the loop is progressing log::trace!( @@ -302,9 +326,15 @@ impl, TC: TargetClient

> Finality ); // read missing headers - let selector = JustifiedHeaderSelector::new::(&self.source_client, info).await?; + let selector = JustifiedHeaderSelector::new::( + &self.source_client, + info, + self.sync_params.headers_to_relay, + free_headers_interval, + ) + .await?; // if we see that the header schedules GRANDPA change, we need to submit it - if self.sync_params.only_mandatory_headers { + if self.sync_params.headers_to_relay == HeadersToRelay::Mandatory { return Ok(selector.select_mandatory()) } @@ -312,7 +342,12 @@ impl, TC: TargetClient

> Finality // => even if we have already selected some header and its persistent finality proof, // we may try to select better header by reading non-persistent proofs from the stream self.finality_proofs_buf.fill(&mut self.finality_proofs_stream); - let maybe_justified_header = selector.select(&self.finality_proofs_buf); + let maybe_justified_header = selector.select( + info, + self.sync_params.headers_to_relay, + free_headers_interval, + &self.finality_proofs_buf, + ); // remove obsolete 'recent' finality proofs + keep its size under certain limit let oldest_finality_proof_to_keep = maybe_justified_header @@ -329,6 +364,7 @@ impl, TC: TargetClient

> Finality pub async fn run_iteration( &mut self, + free_headers_interval: Option, ) -> Result< Option>, Error, @@ -345,12 +381,16 @@ impl, TC: TargetClient

> Finality } // submit new header if we have something new - match self.select_header_to_submit(&info).await? { + match self.select_header_to_submit(&info, free_headers_interval).await? { Some(header) => { - let transaction = - Transaction::submit(&self.target_client, header.header, header.proof) - .await - .map_err(Error::Target)?; + let transaction = Transaction::submit( + &self.target_client, + header.header, + header.proof, + self.sync_params.headers_to_relay == HeadersToRelay::Free, + ) + .await + .map_err(Error::Target)?; self.best_submitted_number = Some(transaction.header_number); Ok(Some(transaction)) }, @@ -378,9 +418,11 @@ impl, TC: TargetClient

> Finality let exit_signal = exit_signal.fuse(); futures::pin_mut!(exit_signal, proof_submission_tx_tracker); + let free_headers_interval = free_headers_interval(&self.target_client).await?; + loop { // run loop iteration - let next_tick = match self.run_iteration().await { + let next_tick = match self.run_iteration(free_headers_interval).await { Ok(Some(tx)) => { proof_submission_tx_tracker .set(tx.track::(self.target_client.clone()).fuse()); @@ -433,6 +475,52 @@ impl, TC: TargetClient

> Finality } } +async fn free_headers_interval( + target_client: &impl TargetClient

, +) -> Result, FailedClient> { + match target_client.free_source_headers_interval().await { + Ok(Some(free_headers_interval)) if !free_headers_interval.is_zero() => { + log::trace!( + target: "bridge", + "Free headers interval for {} headers at {} is: {:?}", + P::SOURCE_NAME, + P::TARGET_NAME, + free_headers_interval, + ); + Ok(Some(free_headers_interval)) + }, + Ok(Some(_free_headers_interval)) => { + log::trace!( + target: "bridge", + "Free headers interval for {} headers at {} is zero. Not submitting any free headers", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + Ok(None) + }, + Ok(None) => { + log::trace!( + target: "bridge", + "Free headers interval for {} headers at {} is None. Not submitting any free headers", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + + Ok(None) + }, + Err(e) => { + log::error!( + target: "bridge", + "Failed to read free headers interval for {} headers at {}: {:?}", + P::SOURCE_NAME, + P::TARGET_NAME, + e, + ); + Err(FailedClient::Target) + }, + } +} + /// Run finality proofs synchronization loop. pub async fn run( source_client: impl SourceClient

, @@ -509,7 +597,7 @@ mod tests { tick: Duration::from_secs(0), recent_finality_proofs_limit: 1024, stall_timeout: Duration::from_secs(1), - only_mandatory_headers: false, + headers_to_relay: HeadersToRelay::All, } } @@ -593,8 +681,8 @@ mod tests { ); } - fn run_only_mandatory_headers_mode_test( - only_mandatory_headers: bool, + fn run_headers_to_relay_mode_test( + headers_to_relay: HeadersToRelay, has_mandatory_headers: bool, ) -> Option> { let (exit_sender, _) = futures::channel::mpsc::unbounded(); @@ -619,7 +707,7 @@ mod tests { tick: Duration::from_secs(0), recent_finality_proofs_limit: 0, stall_timeout: Duration::from_secs(0), - only_mandatory_headers, + headers_to_relay, }, None, ); @@ -628,16 +716,22 @@ mod tests { best_number_at_target: 5, is_using_same_fork: true, }; - finality_loop.select_header_to_submit(&info).await.unwrap() + finality_loop.select_header_to_submit(&info, Some(3)).await.unwrap() }) } #[test] - fn select_header_to_submit_skips_non_mandatory_headers_when_only_mandatory_headers_are_required( - ) { - assert_eq!(run_only_mandatory_headers_mode_test(true, false), None); + fn select_header_to_submit_may_select_non_mandatory_header() { + assert_eq!(run_headers_to_relay_mode_test(HeadersToRelay::Mandatory, false), None); assert_eq!( - run_only_mandatory_headers_mode_test(false, false), + run_headers_to_relay_mode_test(HeadersToRelay::Free, false), + Some(JustifiedHeader { + header: TestSourceHeader(false, 10, 10), + proof: TestFinalityProof(10) + }), + ); + assert_eq!( + run_headers_to_relay_mode_test(HeadersToRelay::All, false), Some(JustifiedHeader { header: TestSourceHeader(false, 10, 10), proof: TestFinalityProof(10) @@ -646,17 +740,23 @@ mod tests { } #[test] - fn select_header_to_submit_selects_mandatory_headers_when_only_mandatory_headers_are_required() - { + fn select_header_to_submit_may_select_mandatory_header() { + assert_eq!( + run_headers_to_relay_mode_test(HeadersToRelay::Mandatory, true), + Some(JustifiedHeader { + header: TestSourceHeader(true, 8, 8), + proof: TestFinalityProof(8) + }), + ); assert_eq!( - run_only_mandatory_headers_mode_test(true, true), + run_headers_to_relay_mode_test(HeadersToRelay::Free, true), Some(JustifiedHeader { header: TestSourceHeader(true, 8, 8), proof: TestFinalityProof(8) }), ); assert_eq!( - run_only_mandatory_headers_mode_test(false, true), + run_headers_to_relay_mode_test(HeadersToRelay::All, true), Some(JustifiedHeader { header: TestSourceHeader(true, 8, 8), proof: TestFinalityProof(8) @@ -690,7 +790,7 @@ mod tests { test_sync_params(), Some(metrics_sync.clone()), ); - finality_loop.run_iteration().await.unwrap() + finality_loop.run_iteration(None).await.unwrap() }); assert!(!metrics_sync.is_using_same_fork()); diff --git a/bridges/relays/finality/src/headers.rs b/bridges/relays/finality/src/headers.rs index 91f7cd0378e..5bba4a38456 100644 --- a/bridges/relays/finality/src/headers.rs +++ b/bridges/relays/finality/src/headers.rs @@ -16,10 +16,11 @@ use crate::{ finality_loop::SyncInfo, finality_proofs::FinalityProofsBuf, Error, FinalitySyncPipeline, - SourceClient, SourceHeader, TargetClient, + HeadersToRelay, SourceClient, SourceHeader, TargetClient, }; use bp_header_chain::FinalityProof; +use num_traits::Saturating; use std::cmp::Ordering; /// Unjustified headers container. Ordered by header number. @@ -50,9 +51,13 @@ pub enum JustifiedHeaderSelector { } impl JustifiedHeaderSelector

{ + /// Selects last header with persistent justification, missing from the target and matching + /// the `headers_to_relay` criteria. pub(crate) async fn new, TC: TargetClient

>( source_client: &SC, info: &SyncInfo

, + headers_to_relay: HeadersToRelay, + free_headers_interval: Option, ) -> Result> { let mut unjustified_headers = Vec::new(); let mut maybe_justified_header = None; @@ -70,12 +75,19 @@ impl JustifiedHeaderSelector

{ return Ok(Self::Mandatory(JustifiedHeader { header, proof })) }, (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), - (false, Some(proof)) => { + (false, Some(proof)) + if need_to_relay::

( + info, + headers_to_relay, + free_headers_interval, + &header, + ) => + { log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); unjustified_headers.clear(); maybe_justified_header = Some(JustifiedHeader { header, proof }); }, - (false, None) => { + _ => { unjustified_headers.push(header); }, } @@ -97,6 +109,7 @@ impl JustifiedHeaderSelector

{ }) } + /// Returns selected mandatory header if we have seen one. Otherwise returns `None`. pub fn select_mandatory(self) -> Option> { match self { JustifiedHeaderSelector::Mandatory(header) => Some(header), @@ -104,7 +117,15 @@ impl JustifiedHeaderSelector

{ } } - pub fn select(self, buf: &FinalityProofsBuf

) -> Option> { + /// Tries to improve previously selected header using ephemeral + /// justifications stream. + pub fn select( + self, + info: &SyncInfo

, + headers_to_relay: HeadersToRelay, + free_headers_interval: Option, + buf: &FinalityProofsBuf

, + ) -> Option> { let (unjustified_headers, maybe_justified_header) = match self { JustifiedHeaderSelector::Mandatory(justified_header) => return Some(justified_header), JustifiedHeaderSelector::Regular(unjustified_headers, justified_header) => @@ -122,7 +143,14 @@ impl JustifiedHeaderSelector

{ (maybe_finality_proof, maybe_unjustified_header) { match finality_proof.target_header_number().cmp(&unjustified_header.number()) { - Ordering::Equal => { + Ordering::Equal + if need_to_relay::

( + info, + headers_to_relay, + free_headers_interval, + &unjustified_header, + ) => + { log::trace!( target: "bridge", "Managed to improve selected {} finality proof {:?} to {:?}.", @@ -135,6 +163,10 @@ impl JustifiedHeaderSelector

{ proof: finality_proof.clone(), }) }, + Ordering::Equal => { + maybe_finality_proof = finality_proofs_iter.next(); + maybe_unjustified_header = unjustified_headers_iter.next(); + }, Ordering::Less => maybe_unjustified_header = unjustified_headers_iter.next(), Ordering::Greater => { maybe_finality_proof = finality_proofs_iter.next(); @@ -152,6 +184,27 @@ impl JustifiedHeaderSelector

{ } } +/// Returns true if we want to relay header `header_number`. +fn need_to_relay( + info: &SyncInfo

, + headers_to_relay: HeadersToRelay, + free_headers_interval: Option, + header: &P::Header, +) -> bool { + match headers_to_relay { + HeadersToRelay::All => true, + HeadersToRelay::Mandatory => header.is_mandatory(), + HeadersToRelay::Free => + header.is_mandatory() || + free_headers_interval + .map(|free_headers_interval| { + header.number().saturating_sub(info.best_number_at_target) >= + free_headers_interval + }) + .unwrap_or(false), + } +} + #[cfg(test)] mod tests { use super::*; @@ -159,13 +212,22 @@ mod tests { #[test] fn select_better_recent_finality_proof_works() { + let info = SyncInfo { + best_number_at_source: 10, + best_number_at_target: 5, + is_using_same_fork: true, + }; + // if there are no unjustified headers, nothing is changed let finality_proofs_buf = FinalityProofsBuf::::new(vec![TestFinalityProof(5)]); let justified_header = JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; let selector = JustifiedHeaderSelector::Regular(vec![], justified_header.clone()); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there are no buffered finality proofs, nothing is changed let finality_proofs_buf = FinalityProofsBuf::::new(vec![]); @@ -175,7 +237,10 @@ mod tests { vec![TestSourceHeader(false, 5, 5)], justified_header.clone(), ); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there's no intersection between recent finality proofs and unjustified headers, // nothing is changed @@ -189,7 +254,10 @@ mod tests { vec![TestSourceHeader(false, 9, 9), TestSourceHeader(false, 10, 10)], justified_header.clone(), ); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there's intersection between recent finality proofs and unjustified headers, but there // are no proofs in this intersection, nothing is changed @@ -207,7 +275,10 @@ mod tests { ], justified_header.clone(), ); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there's intersection between recent finality proofs and unjustified headers and // there's a proof in this intersection: @@ -228,11 +299,63 @@ mod tests { justified_header, ); assert_eq!( - selector.select(&finality_proofs_buf), + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), Some(JustifiedHeader { header: TestSourceHeader(false, 9, 9), proof: TestFinalityProof(9) }) ); + + // when only free headers needs to be relayed and there are no free headers + let finality_proofs_buf = FinalityProofsBuf::::new(vec![ + TestFinalityProof(7), + TestFinalityProof(9), + ]); + let selector = JustifiedHeaderSelector::None(vec![ + TestSourceHeader(false, 8, 8), + TestSourceHeader(false, 9, 9), + TestSourceHeader(false, 10, 10), + ]); + assert_eq!( + selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), + None, + ); + + // when only free headers needs to be relayed, mandatory header may be selected + let finality_proofs_buf = FinalityProofsBuf::::new(vec![ + TestFinalityProof(6), + TestFinalityProof(9), + ]); + let selector = JustifiedHeaderSelector::None(vec![ + TestSourceHeader(false, 8, 8), + TestSourceHeader(true, 9, 9), + TestSourceHeader(false, 10, 10), + ]); + assert_eq!( + selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), + Some(JustifiedHeader { + header: TestSourceHeader(true, 9, 9), + proof: TestFinalityProof(9) + }) + ); + + // when only free headers needs to be relayed and there is free header + let finality_proofs_buf = FinalityProofsBuf::::new(vec![ + TestFinalityProof(7), + TestFinalityProof(9), + TestFinalityProof(14), + ]); + let selector = JustifiedHeaderSelector::None(vec![ + TestSourceHeader(false, 7, 7), + TestSourceHeader(false, 10, 10), + TestSourceHeader(false, 14, 14), + ]); + assert_eq!( + selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), + Some(JustifiedHeader { + header: TestSourceHeader(false, 14, 14), + proof: TestFinalityProof(14) + }) + ); } } diff --git a/bridges/relays/finality/src/lib.rs b/bridges/relays/finality/src/lib.rs index 3579e68e1ef..4346f96674b 100644 --- a/bridges/relays/finality/src/lib.rs +++ b/bridges/relays/finality/src/lib.rs @@ -21,7 +21,9 @@ pub use crate::{ base::{FinalityPipeline, SourceClientBase}, - finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient}, + finality_loop::{ + metrics_prefix, run, FinalitySyncParams, HeadersToRelay, SourceClient, TargetClient, + }, finality_proofs::{FinalityProofsBuf, FinalityProofsStream}, sync_loop_metrics::SyncLoopMetrics, }; diff --git a/bridges/relays/finality/src/mock.rs b/bridges/relays/finality/src/mock.rs index e3ec4e4d0d4..69357f71ce2 100644 --- a/bridges/relays/finality/src/mock.rs +++ b/bridges/relays/finality/src/mock.rs @@ -198,10 +198,15 @@ impl TargetClient for TestTargetClient { Ok(data.target_best_block_id) } + async fn free_source_headers_interval(&self) -> Result, TestError> { + Ok(Some(3)) + } + async fn submit_finality_proof( &self, header: TestSourceHeader, proof: TestFinalityProof, + _is_free_execution_expected: bool, ) -> Result { let mut data = self.data.lock(); (self.on_method_call)(&mut data); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index 90558ed4613..cf1957c7323 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -24,6 +24,7 @@ use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use crate::{ cli::{bridge::*, chain_schema::*, PrometheusParams}, finality::SubstrateFinalitySyncPipeline, + HeadersToRelay, }; /// Chain headers relaying params. @@ -33,6 +34,10 @@ pub struct RelayHeadersParams { /// are relayed. #[structopt(long)] only_mandatory_headers: bool, + /// If passed, only free headers (mandatory and every Nth header, if configured in runtime) + /// are relayed. Overrides `only_mandatory_headers`. + #[structopt(long)] + only_free_headers: bool, #[structopt(flatten)] source: SourceConnectionParams, #[structopt(flatten)] @@ -43,11 +48,22 @@ pub struct RelayHeadersParams { prometheus_params: PrometheusParams, } +impl RelayHeadersParams { + fn headers_to_relay(&self) -> HeadersToRelay { + match (self.only_mandatory_headers, self.only_free_headers) { + (_, true) => HeadersToRelay::Free, + (true, false) => HeadersToRelay::Mandatory, + _ => HeadersToRelay::All, + } + } +} + /// Trait used for relaying headers between 2 chains. #[async_trait] pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { /// Relay headers. async fn relay_headers(data: RelayHeadersParams) -> anyhow::Result<()> { + let headers_to_relay = data.headers_to_relay(); let source_client = data.source.into_client::().await?; let target_client = data.target.into_client::().await?; let target_transactions_mortality = data.target_sign.target_transactions_mortality; @@ -67,7 +83,7 @@ pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { crate::finality::run::( source_client, target_client, - data.only_mandatory_headers, + headers_to_relay, target_transactions_params, metrics_params, ) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index 27e9f1c21ba..a796df6721b 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -40,7 +40,7 @@ use crate::{ cli::{bridge::MessagesCliBridge, HexLaneId, PrometheusParams}, messages_lane::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, - TaggedAccount, TransactionParams, + HeadersToRelay, TaggedAccount, TransactionParams, }; use bp_messages::LaneId; use bp_runtime::BalanceOf; @@ -61,11 +61,25 @@ pub struct HeadersAndMessagesSharedParams { /// are relayed. #[structopt(long)] pub only_mandatory_headers: bool, + /// If passed, only free headers (mandatory and every Nth header, if configured in runtime) + /// are relayed. Overrides `only_mandatory_headers`. + #[structopt(long)] + pub only_free_headers: bool, #[structopt(flatten)] /// Prometheus metrics params. pub prometheus_params: PrometheusParams, } +impl HeadersAndMessagesSharedParams { + fn headers_to_relay(&self) -> HeadersToRelay { + match (self.only_mandatory_headers, self.only_free_headers) { + (_, true) => HeadersToRelay::Free, + (true, false) => HeadersToRelay::Mandatory, + _ => HeadersToRelay::All, + } + } +} + /// Bridge parameters, shared by all bridge types. pub struct Full2WayBridgeCommonParams< Left: ChainWithTransactions + ChainWithRuntimeVersion, @@ -418,6 +432,7 @@ mod tests { shared: HeadersAndMessagesSharedParams { lane: vec![HexLaneId([0x00, 0x00, 0x00, 0x00])], only_mandatory_headers: false, + only_free_headers: false, prometheus_params: PrometheusParams { no_prometheus: false, prometheus_host: "0.0.0.0".into(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs index 76accfa2905..7f6f4077782 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs @@ -180,7 +180,7 @@ where self.left_relay.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), Some(self.common.metrics_params.clone()), ); let right_relay_to_left_on_demand_headers = @@ -188,7 +188,7 @@ where self.right_relay.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), Some(self.common.metrics_params.clone()), ); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs index b75ac3e60c2..5911fe49df4 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs @@ -171,7 +171,7 @@ where self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), None, ); let right_relay_to_left_on_demand_headers = @@ -179,7 +179,7 @@ where self.right_relay.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), Some(self.common.metrics_params.clone()), ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs index b397ff50a20..832df4ae400 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs @@ -152,7 +152,7 @@ where self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), None, ); let right_to_left_on_demand_headers = @@ -160,7 +160,7 @@ where self.common.right.client.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), None, ); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index e5a52349469..1425233add1 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -43,6 +43,10 @@ pub struct RelayParachainsParams { target: TargetConnectionParams, #[structopt(flatten)] target_sign: TargetSigningParams, + /// If passed, only free headers (those, available at "free" relay chain headers) + /// are relayed. + #[structopt(long)] + only_free_headers: bool, #[structopt(flatten)] prometheus_params: PrometheusParams, } @@ -59,9 +63,9 @@ where { /// Start relaying parachains finality. async fn relay_parachains(data: RelayParachainsParams) -> anyhow::Result<()> { - let source_client = data.source.into_client::().await?; + let source_chain_client = data.source.into_client::().await?; let source_client = ParachainsSource::::new( - source_client, + source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -69,9 +73,10 @@ where signer: data.target_sign.to_keypair::()?, mortality: data.target_sign.target_transactions_mortality, }; - let target_client = data.target.into_client::().await?; + let target_chain_client = data.target.into_client::().await?; let target_client = ParachainsTarget::::new( - target_client.clone(), + source_chain_client, + target_chain_client, target_transaction_params, ); @@ -83,6 +88,7 @@ where source_client, target_client, metrics_params, + data.only_free_headers, futures::future::pending(), ) .await diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index 206f628b143..a06857ae1d9 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -25,7 +25,7 @@ use crate::{ use async_trait::async_trait; use bp_header_chain::justification::{GrandpaJustification, JustificationVerificationContext}; -use finality_relay::{FinalityPipeline, FinalitySyncPipeline}; +use finality_relay::{FinalityPipeline, FinalitySyncPipeline, HeadersToRelay}; use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, @@ -115,6 +115,7 @@ pub trait SubmitFinalityProofCallBuilder { fn build_submit_finality_proof_call( header: SyncHeader>, proof: SubstrateFinalityProof

, + is_free_execution_expected: bool, context: <

::FinalityEngine as Engine>::FinalityVerificationContext, ) -> CallOf; } @@ -142,6 +143,7 @@ where fn build_submit_finality_proof_call( header: SyncHeader>, proof: GrandpaJustification>, + _is_free_execution_expected: bool, _context: JustificationVerificationContext, ) -> CallOf { BridgeGrandpaCall::::submit_finality_proof { @@ -176,6 +178,7 @@ macro_rules! generate_submit_finality_proof_call_builder { <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain > >, + _is_free_execution_expected: bool, _context: bp_header_chain::justification::JustificationVerificationContext, ) -> relay_substrate_client::CallOf< <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::TargetChain @@ -215,6 +218,7 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain > >, + is_free_execution_expected: bool, context: bp_header_chain::justification::JustificationVerificationContext, ) -> relay_substrate_client::CallOf< <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::TargetChain @@ -223,7 +227,8 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { $bridge_grandpa($submit_finality_proof { finality_target: Box::new(header.into_inner()), justification: proof, - current_set_id: context.authority_set_id + current_set_id: context.authority_set_id, + is_free_execution_expected, }) } } @@ -235,15 +240,16 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { pub async fn run( source_client: Client, target_client: Client, - only_mandatory_headers: bool, + headers_to_relay: HeadersToRelay, transaction_params: TransactionParams>, metrics_params: MetricsParams, ) -> anyhow::Result<()> { log::info!( target: "bridge", - "Starting {} -> {} finality proof relay", + "Starting {} -> {} finality proof relay: relaying {:?} headers", P::SourceChain::NAME, P::TargetChain::NAME, + headers_to_relay, ); finality_relay::run( @@ -260,7 +266,7 @@ pub async fn run( P::TargetChain::AVERAGE_BLOCK_INTERVAL, relay_utils::STALL_TIMEOUT, ), - only_mandatory_headers, + headers_to_relay, }, metrics_params, futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 18464d523f4..adbcfe0096d 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -25,9 +25,10 @@ use crate::{ }; use async_trait::async_trait; +use bp_runtime::BlockNumberOf; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountKeyPairOf, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, + AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; @@ -103,10 +104,23 @@ impl TargetClient Result>, Self::Error> { + self.client + .typed_state_call( + P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), + (), + Some(self.client.best_header().await?.hash()), + ) + .await + } + async fn submit_finality_proof( &self, header: SyncHeader>, mut proof: SubstrateFinalityProof

, + is_free_execution_expected: bool, ) -> Result { // verify and runtime module at target chain may require optimized finality proof let context = @@ -115,7 +129,10 @@ impl TargetClient OnDemandHeadersRelay

{ source_client: Client, target_client: Client, target_transaction_params: TransactionParams>, - only_mandatory_headers: bool, + headers_to_relay: HeadersToRelay, metrics_params: Option, ) -> Self where @@ -94,7 +94,7 @@ impl OnDemandHeadersRelay

{ source_client, target_client, target_transaction_params, - only_mandatory_headers, + headers_to_relay, required_header_number, metrics_params, ) @@ -191,7 +191,7 @@ impl OnDemandRelay( source_client: Client, target_client: Client, target_transaction_params: TransactionParams>, - only_mandatory_headers: bool, + headers_to_relay: HeadersToRelay, required_header_number: RequiredHeaderNumberRef, metrics_params: Option, ) where @@ -346,11 +346,11 @@ async fn background_task( log::info!( target: "bridge", "[{}] Starting on-demand headers relay task\n\t\ - Only mandatory headers: {}\n\t\ + Headers to relay: {:?}\n\t\ Tx mortality: {:?} (~{}m)\n\t\ Stall timeout: {:?}", relay_task_name, - only_mandatory_headers, + headers_to_relay, target_transactions_mortality, stall_timeout.as_secs_f64() / 60.0f64, stall_timeout, @@ -367,7 +367,7 @@ async fn background_task( ), recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, stall_timeout, - only_mandatory_headers, + headers_to_relay, }, metrics_params.clone().unwrap_or_else(MetricsParams::disabled), futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index f67c002bba7..966bdc31072 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -222,6 +222,7 @@ where proved_relay_block, vec![(para_id, para_hash)], para_proof, + false, )); Ok((proved_parachain_block, calls)) @@ -256,8 +257,11 @@ async fn background_task( let mut parachains_source = ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = - ParachainsTarget::

::new(target_client.clone(), target_transaction_params.clone()); + let mut parachains_target = ParachainsTarget::

::new( + source_relay_client.clone(), + target_client.clone(), + target_transaction_params.clone(), + ); loop { select! { @@ -392,6 +396,8 @@ async fn background_task( parachains_source.clone(), parachains_target.clone(), MetricsParams::disabled(), + // we do not support free parachain headers relay in on-demand relays + false, futures::future::pending(), ) .fuse(), @@ -481,7 +487,7 @@ where let para_header_at_target = best_finalized_peer_header_at_self::< P::TargetChain, P::SourceParachain, - >(target.client(), best_target_block_hash) + >(target.target_client(), best_target_block_hash) .await; // if there are no parachain heads at the target (`NoParachainHeadAtTarget`), we'll need to // submit at least one. Otherwise the pallet will be treated as uninitialized and messages @@ -504,7 +510,7 @@ where let relay_header_at_target = best_finalized_peer_header_at_self::< P::TargetChain, P::SourceRelayChain, - >(target.client(), best_target_block_hash) + >(target.target_client(), best_target_block_hash) .await .map_err(map_target_err)?; diff --git a/bridges/relays/lib-substrate-relay/src/parachains/mod.rs b/bridges/relays/lib-substrate-relay/src/parachains/mod.rs index 722f9b61f9f..8b128bb770d 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/mod.rs @@ -71,6 +71,7 @@ pub trait SubmitParachainHeadsCallBuilder: at_relay_block: HeaderIdOf, parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, + is_free_execution_expected: bool, ) -> CallOf; } @@ -97,6 +98,7 @@ where at_relay_block: HeaderIdOf, parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, + _is_free_execution_expected: bool, ) -> CallOf { BridgeParachainsCall::::submit_parachain_heads { at_relay_block: (at_relay_block.0, at_relay_block.1), diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index 6df7bc0a742..e10d15b6edf 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -24,42 +24,53 @@ use crate::{ }; use async_trait::async_trait; -use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; -use bp_runtime::HeaderIdProvider; -use codec::Decode; +use bp_parachains::{ + ImportedParaHeadsKeyProvider, ParaInfo, ParaStoredHeaderData, ParasInfoKeyProvider, +}; +use bp_polkadot_core::{ + parachains::{ParaHash, ParaHeadsProof, ParaId}, + BlockNumber as RelayBlockNumber, +}; +use bp_runtime::{ + Chain as ChainBase, HeaderId, HeaderIdProvider, StorageDoubleMapKeyProvider, + StorageMapKeyProvider, +}; use parachains_relay::parachains_loop::TargetClient; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, Client, Error as SubstrateError, HeaderIdOf, - ParachainBase, TransactionEra, TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, Client, Error as SubstrateError, + HeaderIdOf, ParachainBase, RelayChain, TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; -use sp_core::{Bytes, Pair}; +use sp_core::Pair; /// Substrate client as parachain heads source. pub struct ParachainsTarget { - client: Client, + source_client: Client, + target_client: Client, transaction_params: TransactionParams>, } impl ParachainsTarget

{ /// Creates new parachains target client. pub fn new( - client: Client, + source_client: Client, + target_client: Client, transaction_params: TransactionParams>, ) -> Self { - ParachainsTarget { client, transaction_params } + ParachainsTarget { source_client, target_client, transaction_params } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { - &self.client + pub fn target_client(&self) -> &Client { + &self.target_client } } impl Clone for ParachainsTarget

{ fn clone(&self) -> Self { ParachainsTarget { - client: self.client.clone(), + source_client: self.source_client.clone(), + target_client: self.target_client.clone(), transaction_params: self.transaction_params.clone(), } } @@ -70,7 +81,9 @@ impl RelayClient for ParachainsTarget

{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await + self.target_client.reconnect().await?; + self.source_client.reconnect().await?; + Ok(()) } } @@ -79,11 +92,13 @@ impl

TargetClient> for ParachainsTarget

where P: SubstrateParachainsPipeline, AccountIdOf: From< as Pair>::Public>, + P::SourceParachain: ChainBase, + P::SourceRelayChain: ChainBase, { type TransactionTracker = TransactionTracker>; async fn best_block(&self) -> Result, Self::Error> { - let best_header = self.client.best_header().await?; + let best_header = self.target_client.best_header().await?; let best_id = best_header.id(); Ok(best_id) @@ -93,7 +108,7 @@ where &self, at_block: &HeaderIdOf, ) -> Result, Self::Error> { - self.client + self.target_client .typed_state_call::<_, Option>>( P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), @@ -104,23 +119,57 @@ where .unwrap_or(Err(SubstrateError::BridgePalletIsNotInitialized)) } + async fn free_source_relay_headers_interval( + &self, + ) -> Result>, Self::Error> { + self.target_client + .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) + .await + } + async fn parachain_head( &self, at_block: HeaderIdOf, - ) -> Result>, Self::Error> { - let encoded_best_finalized_source_para_block = self - .client - .state_call( - P::SourceParachain::BEST_FINALIZED_HEADER_ID_METHOD.into(), - Bytes(Vec::new()), - Some(at_block.1), - ) - .await?; + ) -> Result< + Option<(HeaderIdOf, HeaderIdOf)>, + Self::Error, + > { + // read best parachain head from the target bridge-parachains pallet + let storage_key = ParasInfoKeyProvider::final_key( + P::SourceRelayChain::WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME, + &P::SourceParachain::PARACHAIN_ID.into(), + ); + let storage_value: Option = + self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + let para_info = match storage_value { + Some(para_info) => para_info, + None => return Ok(None), + }; + + // now we need to get full header ids. For source relay chain it is simple, because we + // are connected + let relay_header_id = self + .source_client + .header_by_number(para_info.best_head_hash.at_relay_block_number) + .await? + .id(); - Ok(Option::>::decode( - &mut &encoded_best_finalized_source_para_block.0[..], - ) - .map_err(SubstrateError::ResponseParseFailed)?) + // for parachain, we need to read from the target chain runtime storage + let storage_key = ImportedParaHeadsKeyProvider::final_key( + P::SourceRelayChain::WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME, + &P::SourceParachain::PARACHAIN_ID.into(), + ¶_info.best_head_hash.head_hash, + ); + let storage_value: Option = + self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + let para_head_number = match storage_value { + Some(para_head_data) => + para_head_data.decode_parachain_head_data::()?.number, + None => return Ok(None), + }; + + let para_head_id = HeaderId(para_head_number, para_info.best_head_hash.head_hash); + Ok(Some((relay_header_id, para_head_id))) } async fn submit_parachain_head_proof( @@ -128,14 +177,16 @@ where at_relay_block: HeaderIdOf, updated_head_hash: ParaHash, proof: ParaHeadsProof, + is_free_execution_expected: bool, ) -> Result { let transaction_params = self.transaction_params.clone(); let call = P::SubmitParachainHeadsCallBuilder::build_submit_parachain_heads_call( at_relay_block, vec![(ParaId(P::SourceParachain::PARACHAIN_ID), updated_head_hash)], proof, + is_free_execution_expected, ); - self.client + self.target_client .submit_and_watch_signed_extrinsic( &transaction_params.signer, move |best_block_id, transaction_nonce| { diff --git a/bridges/relays/parachains/src/parachains_loop.rs b/bridges/relays/parachains/src/parachains_loop.rs index 41ebbf5aade..55f236eeac1 100644 --- a/bridges/relays/parachains/src/parachains_loop.rs +++ b/bridges/relays/parachains/src/parachains_loop.rs @@ -25,7 +25,7 @@ use futures::{ future::{FutureExt, Shared}, poll, select_biased, }; -use relay_substrate_client::{Chain, HeaderIdOf, ParachainBase}; +use relay_substrate_client::{BlockNumberOf, Chain, HeaderIdOf, ParachainBase}; use relay_utils::{ metrics::MetricsParams, relay_loop::Client as RelayClient, FailedClient, TrackedTransactionStatus, TransactionTracker, @@ -96,17 +96,27 @@ pub trait TargetClient: RelayClient { /// Get best block id. async fn best_block(&self) -> Result, Self::Error>; - /// Get best finalized source relay chain block id. + /// Get best finalized source relay chain block id. If `free_source_relay_headers_interval` + /// is `Some(_)`, the returned async fn best_finalized_source_relay_chain_block( &self, at_block: &HeaderIdOf, ) -> Result, Self::Error>; + /// Get free source **relay** headers submission interval, if it is configured in the + /// target runtime. We assume that the target chain will accept parachain header, proved + /// at such relay header for free. + async fn free_source_relay_headers_interval( + &self, + ) -> Result>, Self::Error>; /// Get parachain head id at given block. async fn parachain_head( &self, at_block: HeaderIdOf, - ) -> Result>, Self::Error>; + ) -> Result< + Option<(HeaderIdOf, HeaderIdOf)>, + Self::Error, + >; /// Submit parachain heads proof. async fn submit_parachain_head_proof( @@ -114,6 +124,7 @@ pub trait TargetClient: RelayClient { at_source_block: HeaderIdOf, para_head_hash: ParaHash, proof: ParaHeadsProof, + is_free_execution_expected: bool, ) -> Result; } @@ -133,6 +144,7 @@ pub async fn run( source_client: impl SourceClient

, target_client: impl TargetClient

, metrics_params: MetricsParams, + only_free_headers: bool, exit_signal: impl Future + 'static + Send, ) -> Result<(), relay_utils::Error> where @@ -145,7 +157,13 @@ where .expose() .await? .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost(source_client, target_client, metrics, exit_signal.clone()) + run_until_connection_lost( + source_client, + target_client, + metrics, + only_free_headers, + exit_signal.clone(), + ) }) .await } @@ -155,6 +173,7 @@ async fn run_until_connection_lost( source_client: impl SourceClient

, target_client: impl TargetClient

, metrics: Option, + only_free_headers: bool, exit_signal: impl Future + Send, ) -> Result<(), FailedClient> where @@ -166,6 +185,47 @@ where P::TargetChain::AVERAGE_BLOCK_INTERVAL, ); + // free parachain header = header, available (proved) at free relay chain block. Let's + // read interval of free source relay chain blocks from target client + let free_source_relay_headers_interval = if only_free_headers { + let free_source_relay_headers_interval = + target_client.free_source_relay_headers_interval().await.map_err(|e| { + log::warn!( + target: "bridge", + "Failed to read free {} headers interval at {}: {:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + e, + ); + FailedClient::Target + })?; + match free_source_relay_headers_interval { + Some(free_source_relay_headers_interval) if free_source_relay_headers_interval != 0 => { + log::trace!( + target: "bridge", + "Free {} headers interval at {}: {:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + free_source_relay_headers_interval, + ); + free_source_relay_headers_interval + }, + _ => { + log::warn!( + target: "bridge", + "Invalid free {} headers interval at {}: {:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + free_source_relay_headers_interval, + ); + return Err(FailedClient::Target) + }, + } + } else { + // ignore - we don't need it + 0 + }; + let mut submitted_heads_tracker: Option> = None; futures::pin_mut!(exit_signal); @@ -211,7 +271,7 @@ where log::warn!(target: "bridge", "Failed to read best {} block: {:?}", P::SourceRelayChain::NAME, e); FailedClient::Target })?; - let head_at_target = + let (relay_of_head_at_target, head_at_target) = read_head_at_target(&target_client, metrics.as_ref(), &best_target_block).await?; // check if our transaction has been mined @@ -238,9 +298,9 @@ where } } - // we have no active transaction and may need to update heads, but do we have something for - // update? - let best_finalized_relay_block = target_client + // in all-headers strategy we'll be submitting para head, available at + // `best_finalized_relay_block_at_target` + let best_finalized_relay_block_at_target = target_client .best_finalized_source_relay_chain_block(&best_target_block) .await .map_err(|e| { @@ -253,21 +313,56 @@ where ); FailedClient::Target })?; + + // ..but if we only need to submit free headers, we need to submit para + // head, available at best free source relay chain header, known to the + // target chain + let prove_at_relay_block = if only_free_headers { + match relay_of_head_at_target { + Some(relay_of_head_at_target) => { + // find last free relay chain header in the range that we are interested in + let scan_range_begin = relay_of_head_at_target.number(); + let scan_range_end = best_finalized_relay_block_at_target.number(); + if scan_range_end.saturating_sub(scan_range_begin) < + free_source_relay_headers_interval + { + // there are no new **free** relay chain headers in the range + log::trace!( + target: "bridge", + "Waiting for new free {} headers at {}: scanned {:?}..={:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + scan_range_begin, + scan_range_end, + ); + continue; + } + + // we may submit new parachain head for free + best_finalized_relay_block_at_target + }, + None => { + // no parachain head at target => let's submit first one + best_finalized_relay_block_at_target + }, + } + } else { + best_finalized_relay_block_at_target + }; + + // now let's check if we need to update parachain head at all let head_at_source = - read_head_at_source(&source_client, metrics.as_ref(), &best_finalized_relay_block) - .await?; + read_head_at_source(&source_client, metrics.as_ref(), &prove_at_relay_block).await?; let is_update_required = is_update_required::

( head_at_source, head_at_target, - best_finalized_relay_block, + prove_at_relay_block, best_target_block, ); if is_update_required { - let (head_proof, head_hash) = source_client - .prove_parachain_head(best_finalized_relay_block) - .await - .map_err(|e| { + let (head_proof, head_hash) = + source_client.prove_parachain_head(prove_at_relay_block).await.map_err(|e| { log::warn!( target: "bridge", "Failed to prove {} parachain ParaId({}) heads: {:?}", @@ -283,12 +378,17 @@ where P::SourceRelayChain::NAME, P::SourceParachain::PARACHAIN_ID, P::TargetChain::NAME, - best_finalized_relay_block, + prove_at_relay_block, head_hash, ); let transaction_tracker = target_client - .submit_parachain_head_proof(best_finalized_relay_block, head_hash, head_proof) + .submit_parachain_head_proof( + prove_at_relay_block, + head_hash, + head_proof, + only_free_headers, + ) .await .map_err(|e| { log::warn!( @@ -311,7 +411,7 @@ where fn is_update_required( head_at_source: AvailableHeader>, head_at_target: Option>, - best_finalized_relay_block_at_source: HeaderIdOf, + prove_at_relay_block: HeaderIdOf, best_target_block: HeaderIdOf, ) -> bool where @@ -326,7 +426,7 @@ where P::SourceParachain::PARACHAIN_ID, P::TargetChain::NAME, P::SourceRelayChain::NAME, - best_finalized_relay_block_at_source, + prove_at_relay_block, head_at_source, P::TargetChain::NAME, best_target_block, @@ -413,24 +513,28 @@ async fn read_head_at_source( } } -/// Reads parachain head from the target client. +/// Reads parachain head from the target client. Also returns source relay chain header +/// that has been used to prove that head. async fn read_head_at_target( target_client: &impl TargetClient

, metrics: Option<&ParachainsLoopMetrics>, at_block: &HeaderIdOf, -) -> Result>, FailedClient> { +) -> Result< + (Option>, Option>), + FailedClient, +> { let para_head_id = target_client.parachain_head(*at_block).await; match para_head_id { - Ok(Some(para_head_id)) => { + Ok(Some((relay_header_id, para_head_id))) => { if let Some(metrics) = metrics { metrics.update_best_parachain_block_at_target( ParaId(P::SourceParachain::PARACHAIN_ID), para_head_id.number(), ); } - Ok(Some(para_head_id)) + Ok((Some(relay_header_id), Some(para_head_id))) }, - Ok(None) => Ok(None), + Ok(None) => Ok((None, None)), Err(e) => { log::warn!( target: "bridge", @@ -543,6 +647,7 @@ mod tests { use relay_substrate_client::test_chain::{TestChain, TestParachain}; use relay_utils::{HeaderId, MaybeConnectionError}; use sp_core::H256; + use std::collections::HashMap; const PARA_10_HASH: ParaHash = H256([10u8; 32]); const PARA_20_HASH: ParaHash = H256([20u8; 32]); @@ -590,14 +695,21 @@ mod tests { #[derive(Clone, Debug)] struct TestClientData { source_sync_status: Result, - source_head: Result>, TestError>, + source_head: HashMap< + BlockNumberOf, + Result>, TestError>, + >, source_proof: Result<(), TestError>, + target_free_source_relay_headers_interval: + Result>, TestError>, target_best_block: Result, TestError>, target_best_finalized_source_block: Result, TestError>, - target_head: Result>, TestError>, + #[allow(clippy::type_complexity)] + target_head: Result, HeaderIdOf)>, TestError>, target_submit_result: Result<(), TestError>, + submitted_proof_at_source_relay_block: Option>, exit_signal_sender: Option>>, } @@ -605,14 +717,18 @@ mod tests { pub fn minimal() -> Self { TestClientData { source_sync_status: Ok(true), - source_head: Ok(AvailableHeader::Available(HeaderId(0, PARA_20_HASH))), + source_head: vec![(0, Ok(AvailableHeader::Available(HeaderId(0, PARA_20_HASH))))] + .into_iter() + .collect(), source_proof: Ok(()), + target_free_source_relay_headers_interval: Ok(None), target_best_block: Ok(HeaderId(0, Default::default())), target_best_finalized_source_block: Ok(HeaderId(0, Default::default())), target_head: Ok(None), target_submit_result: Ok(()), + submitted_proof_at_source_relay_block: None, exit_signal_sender: None, } } @@ -649,16 +765,24 @@ mod tests { async fn parachain_head( &self, - _at_block: HeaderIdOf, + at_block: HeaderIdOf, ) -> Result>, TestError> { - self.data.lock().await.source_head.clone() + self.data + .lock() + .await + .source_head + .get(&at_block.0) + .expect(&format!("SourceClient::parachain_head({})", at_block.0)) + .clone() } async fn prove_parachain_head( &self, - _at_block: HeaderIdOf, + at_block: HeaderIdOf, ) -> Result<(ParaHeadsProof, ParaHash), TestError> { - let head = *self.data.lock().await.source_head.clone()?.as_available().unwrap(); + let head_result = + SourceClient::::parachain_head(self, at_block).await?; + let head = head_result.as_available().unwrap(); let storage_proof = vec![head.hash().encode()]; let proof = (ParaHeadsProof { storage_proof }, head.hash()); self.data.lock().await.source_proof.clone().map(|_| proof) @@ -680,21 +804,29 @@ mod tests { self.data.lock().await.target_best_finalized_source_block.clone() } + async fn free_source_relay_headers_interval( + &self, + ) -> Result>, TestError> { + self.data.lock().await.target_free_source_relay_headers_interval.clone() + } + async fn parachain_head( &self, _at_block: HeaderIdOf, - ) -> Result>, TestError> { + ) -> Result, HeaderIdOf)>, TestError> { self.data.lock().await.target_head.clone() } async fn submit_parachain_head_proof( &self, - _at_source_block: HeaderIdOf, + at_source_block: HeaderIdOf, _updated_parachain_head: ParaHash, _proof: ParaHeadsProof, + _is_free_execution_expected: bool, ) -> Result { let mut data = self.data.lock().await; data.target_submit_result.clone()?; + data.submitted_proof_at_source_relay_block = Some(at_source_block); if let Some(mut exit_signal_sender) = data.exit_signal_sender.take() { exit_signal_sender.send(()).await.unwrap(); @@ -715,6 +847,7 @@ mod tests { TestClient::from(test_source_client), TestClient::from(TestClientData::minimal()), None, + false, futures::future::pending(), )), Err(FailedClient::Source), @@ -731,6 +864,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -747,6 +881,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -763,6 +898,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -772,13 +908,14 @@ mod tests { #[test] fn when_source_client_fails_to_read_heads() { let mut test_source_client = TestClientData::minimal(); - test_source_client.source_head = Err(TestError::Error); + test_source_client.source_head.insert(0, Err(TestError::Error)); assert_eq!( async_std::task::block_on(run_until_connection_lost( TestClient::from(test_source_client), TestClient::from(TestClientData::minimal()), None, + false, futures::future::pending(), )), Err(FailedClient::Source), @@ -795,6 +932,7 @@ mod tests { TestClient::from(test_source_client), TestClient::from(TestClientData::minimal()), None, + false, futures::future::pending(), )), Err(FailedClient::Source), @@ -811,6 +949,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -825,12 +964,108 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(TestClientData::with_exit_signal_sender(exit_signal_sender)), None, + false, exit_signal.into_future().map(|(_, _)| ()), )), Ok(()), ); } + #[async_std::test] + async fn free_headers_are_relayed() { + // prepare following case: + // 1) best source relay at target: 95 + // 2) best source parachain at target: 5 at relay 50 + // 3) free headers interval: 10 + // 4) at source relay chain block 90 source parachain block is 9 + // + + // 5) best finalized source relay chain block is 95 + // 6) at source relay chain block 95 source parachain block is 42 + // => + // parachain block 42 would have been relayed, because 95 - 50 > 10 + let (exit_signal_sender, exit_signal) = futures::channel::mpsc::unbounded(); + let clients_data = TestClientData { + source_sync_status: Ok(true), + source_head: vec![ + (90, Ok(AvailableHeader::Available(HeaderId(9, [9u8; 32].into())))), + (95, Ok(AvailableHeader::Available(HeaderId(42, [42u8; 32].into())))), + ] + .into_iter() + .collect(), + source_proof: Ok(()), + + target_free_source_relay_headers_interval: Ok(Some(10)), + target_best_block: Ok(HeaderId(200, [200u8; 32].into())), + target_best_finalized_source_block: Ok(HeaderId(95, [95u8; 32].into())), + target_head: Ok(Some((HeaderId(50, [50u8; 32].into()), HeaderId(5, [5u8; 32].into())))), + target_submit_result: Ok(()), + + submitted_proof_at_source_relay_block: None, + exit_signal_sender: Some(Box::new(exit_signal_sender)), + }; + + let source_client = TestClient::from(clients_data.clone()); + let target_client = TestClient::from(clients_data); + assert_eq!( + run_until_connection_lost( + source_client, + target_client.clone(), + None, + true, + exit_signal.into_future().map(|(_, _)| ()), + ) + .await, + Ok(()), + ); + + assert_eq!( + target_client + .data + .lock() + .await + .submitted_proof_at_source_relay_block + .map(|id| id.0), + Some(95) + ); + + // now source relay block chain 104 is mined with parachain head #84 + // => since 104 - 95 < 10, there are no free headers + // => nothing is submitted + let mut clients_data: TestClientData = target_client.data.lock().await.clone(); + clients_data + .source_head + .insert(104, Ok(AvailableHeader::Available(HeaderId(84, [84u8; 32].into())))); + clients_data.target_best_finalized_source_block = Ok(HeaderId(104, [104u8; 32].into())); + clients_data.target_head = + Ok(Some((HeaderId(95, [95u8; 32].into()), HeaderId(42, [42u8; 32].into())))); + clients_data.target_best_block = Ok(HeaderId(255, [255u8; 32].into())); + clients_data.exit_signal_sender = None; + + let source_client = TestClient::from(clients_data.clone()); + let target_client = TestClient::from(clients_data); + assert_eq!( + run_until_connection_lost( + source_client, + target_client.clone(), + None, + true, + async_std::task::sleep(std::time::Duration::from_millis(100)), + ) + .await, + Ok(()), + ); + + assert_eq!( + target_client + .data + .lock() + .await + .submitted_proof_at_source_relay_block + .map(|id| id.0), + Some(95) + ); + } + fn test_tx_tracker() -> SubmittedHeadsTracker { SubmittedHeadsTracker::new( AvailableHeader::Available(HeaderId(20, PARA_20_HASH)), diff --git a/prdoc/pr_4157.prdoc b/prdoc/pr_4157.prdoc new file mode 100644 index 00000000000..783eaa2dd42 --- /dev/null +++ b/prdoc/pr_4157.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bridge: added free headers submission support to the substrate-relay" + +doc: + - audience: Node Dev + description: | + Bridge finality and parachains relayer now supports mode, where it only submits some headers + for free. There's a setting in a runtime configuration, which introduces this "free header" + concept. Submitting such header is considered a common good deed, so it is free for relayers. + +crates: + - name: bp-bridge-hub-kusama + bump: major + - name: bp-bridge-hub-polkadot + bump: major + - name: bp-bridge-hub-rococo + bump: major + - name: bp-bridge-hub-westend + bump: major + - name: relay-substrate-client + bump: major + - name: finality-relay + bump: major + - name: substrate-relay-helper + bump: major + - name: parachains-relay + bump: major -- GitLab From b801d001e812778b1547352468d5f243b7070994 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 25 Apr 2024 10:47:46 +0200 Subject: [PATCH 070/107] Contracts: Stabilize XCM host fns (#4213) See https://github.com/paritytech/ink/pull/1912 https://github.com/paritytech/ink-docs/pull/338 --- prdoc/pr_4213.prdoc | 11 +++++++++++ substrate/frame/contracts/src/lib.rs | 4 ++-- substrate/frame/contracts/src/wasm/runtime.rs | 2 -- 3 files changed, 13 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_4213.prdoc diff --git a/prdoc/pr_4213.prdoc b/prdoc/pr_4213.prdoc new file mode 100644 index 00000000000..ce7eb65969b --- /dev/null +++ b/prdoc/pr_4213.prdoc @@ -0,0 +1,11 @@ +title: "[pallet-contracts] stabilize xcm_send and xcm_execute" + +doc: + - audience: Runtime Dev + description: | + `xcm_send` and `xcm_execute` are currently marked as unstable. This PR stabilizes them. +crates: +- name: pallet-contracts + bump: major + + diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 20cf7d1651c..0045d72141c 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -223,14 +223,14 @@ pub struct Environment { pub struct ApiVersion(u16); impl Default for ApiVersion { fn default() -> Self { - Self(2) + Self(3) } } #[test] fn api_version_is_up_to_date() { assert_eq!( - 109, + 111, crate::wasm::STABLE_API_COUNT, "Stable API count has changed. Bump the returned value of ApiVersion::default() and update the test." ); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 160dfa0d2f3..52ceda99edb 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -2104,7 +2104,6 @@ pub mod env { /// Execute an XCM program locally, using the contract's address as the origin. /// See [`pallet_contracts_uapi::HostFn::execute_xcm`]. - #[unstable] fn xcm_execute( ctx: _, memory: _, @@ -2143,7 +2142,6 @@ pub mod env { /// Send an XCM program from the contract to the specified destination. /// See [`pallet_contracts_uapi::HostFn::send_xcm`]. - #[unstable] fn xcm_send( ctx: _, memory: _, -- GitLab From 077041788070eddc6f3c1043b9cb6146585b1469 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 25 Apr 2024 12:01:21 +0300 Subject: [PATCH 071/107] [XCM] Treat recursion limit error as transient in the MQ (#4202) Changes: - Add new error variant `ProcessMessageError::StackLimitReached` and treat XCM error `ExceedsStackLimit` as such. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Branislav Kontur --- .../xcm-builder/src/process_xcm_message.rs | 46 +++++++- polkadot/xcm/xcm-executor/src/lib.rs | 7 ++ prdoc/pr_4202.prdoc | 16 +++ substrate/frame/message-queue/src/lib.rs | 16 ++- substrate/frame/message-queue/src/mock.rs | 3 + substrate/frame/message-queue/src/tests.rs | 101 +++++++++++++++++- .../frame/support/src/traits/messages.rs | 4 + 7 files changed, 187 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_4202.prdoc diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index bcf91d8e68c..7760274f6e2 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -102,7 +102,12 @@ impl< target: LOG_TARGET, "XCM message execution error: {error:?}", ); - (required, Err(ProcessMessageError::Unsupported)) + let error = match error { + xcm::latest::Error::ExceedsStackLimit => ProcessMessageError::StackLimitReached, + _ => ProcessMessageError::Unsupported, + }; + + (required, Err(error)) }, }; meter.consume(consumed); @@ -148,6 +153,45 @@ mod tests { } } + #[test] + fn process_message_exceeds_limits_fails() { + struct MockedExecutor; + impl ExecuteXcm<()> for MockedExecutor { + type Prepared = xcm_executor::WeighedMessage<()>; + fn prepare( + message: xcm::latest::Xcm<()>, + ) -> core::result::Result> { + Ok(xcm_executor::WeighedMessage::new(Weight::zero(), message)) + } + fn execute( + _: impl Into, + _: Self::Prepared, + _: &mut XcmHash, + _: Weight, + ) -> Outcome { + Outcome::Error { error: xcm::latest::Error::ExceedsStackLimit } + } + fn charge_fees(_location: impl Into, _fees: Assets) -> xcm::latest::Result { + unreachable!() + } + } + + type Processor = ProcessXcmMessage; + + let xcm = VersionedXcm::V4(xcm::latest::Xcm::<()>(vec![ + xcm::latest::Instruction::<()>::ClearOrigin, + ])); + assert_err!( + Processor::process_message( + &xcm.encode(), + ORIGIN, + &mut WeightMeter::new(), + &mut [0; 32] + ), + ProcessMessageError::StackLimitReached, + ); + } + #[test] fn process_message_overweight_fails() { for msg in [v3_xcm(true), v3_xcm(false), v3_xcm(false), v2_xcm(false)] { diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e673a46c4ac..a7052328da0 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -182,6 +182,13 @@ impl PreparedMessage for WeighedMessage { } } +#[cfg(any(test, feature = "std"))] +impl WeighedMessage { + pub fn new(weight: Weight, message: Xcm) -> Self { + Self(weight, message) + } +} + impl ExecuteXcm for XcmExecutor { type Prepared = WeighedMessage; fn prepare( diff --git a/prdoc/pr_4202.prdoc b/prdoc/pr_4202.prdoc new file mode 100644 index 00000000000..6469c3c7840 --- /dev/null +++ b/prdoc/pr_4202.prdoc @@ -0,0 +1,16 @@ +title: "Treat XCM ExceedsStackLimit errors as transient in the MQ pallet" + +doc: + - audience: Runtime User + description: | + Fixes an issue where the MessageQueue can incorrectly assume that a message will permanently fail to process and disallow retrial of it. + +crates: + - name: frame-support + bump: major + - name: pallet-message-queue + bump: patch + - name: staging-xcm-builder + bump: patch + - name: staging-xcm-executor + bump: patch diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index ec85c785f79..ef3420d21be 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -765,6 +765,13 @@ enum MessageExecutionStatus { Processed, /// The message was processed and resulted in a, possibly permanent, error. Unprocessable { permanent: bool }, + /// The stack depth limit was reached. + /// + /// We cannot just return `Unprocessable` in this case, because the processability of the + /// message depends on how the function was called. This may be a permanent error if it was + /// called by a top-level function, or a transient error if it was already called in a nested + /// function. + StackLimitReached, } impl Pallet { @@ -984,7 +991,8 @@ impl Pallet { // additional overweight event being deposited. ) { Overweight | InsufficientWeight => Err(Error::::InsufficientWeight), - Unprocessable { permanent: false } => Err(Error::::TemporarilyUnprocessable), + StackLimitReached | Unprocessable { permanent: false } => + Err(Error::::TemporarilyUnprocessable), Unprocessable { permanent: true } | Processed => { page.note_processed_at_pos(pos); book_state.message_count.saturating_dec(); @@ -1250,7 +1258,7 @@ impl Pallet { let is_processed = match res { InsufficientWeight => return ItemExecutionStatus::Bailed, Unprocessable { permanent: false } => return ItemExecutionStatus::NoProgress, - Processed | Unprocessable { permanent: true } => true, + Processed | Unprocessable { permanent: true } | StackLimitReached => true, Overweight => false, }; @@ -1461,6 +1469,10 @@ impl Pallet { Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); MessageExecutionStatus::Unprocessable { permanent: true } }, + Err(error @ StackLimitReached) => { + Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); + MessageExecutionStatus::StackLimitReached + }, Ok(success) => { // Success let weight_used = meter.consumed().saturating_sub(prev_consumed); diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 1281de6b0a6..66a242d5a18 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -198,6 +198,7 @@ impl ProcessMessage for RecordingMessageProcessor { parameter_types! { pub static Callback: Box = Box::new(|_, _| {}); + pub static IgnoreStackOvError: bool = false; } /// Processed a mocked message. Messages that end with `badformat`, `corrupt`, `unsupported` or @@ -216,6 +217,8 @@ fn processing_message(msg: &[u8], origin: &MessageOrigin) -> Result<(), ProcessM Err(ProcessMessageError::Unsupported) } else if msg.ends_with("yield") { Err(ProcessMessageError::Yield) + } else if msg.ends_with("stacklimitreached") && !IgnoreStackOvError::get() { + Err(ProcessMessageError::StackLimitReached) } else { Ok(()) } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index d6788847d57..e89fdb8b320 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -174,9 +174,10 @@ fn service_queues_failing_messages_works() { MessageQueue::enqueue_message(msg("badformat"), Here); MessageQueue::enqueue_message(msg("corrupt"), Here); MessageQueue::enqueue_message(msg("unsupported"), Here); + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); MessageQueue::enqueue_message(msg("yield"), Here); // Starts with four pages. - assert_pages(&[0, 1, 2, 3]); + assert_pages(&[0, 1, 2, 3, 4]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( @@ -206,9 +207,9 @@ fn service_queues_failing_messages_works() { .into(), ); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(System::events().len(), 3); + assert_eq!(System::events().len(), 4); // Last page with the `yield` stays in. - assert_pages(&[3]); + assert_pages(&[4]); }); } @@ -1880,3 +1881,97 @@ fn process_enqueued_on_idle_requires_enough_weight() { assert_eq!(MessagesProcessed::take(), vec![]); }) } + +/// A message that reports `StackLimitReached` will not be put into the overweight queue when +/// executed from the top level. +#[test] +fn process_discards_stack_ov_message() { + use MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + + MessageQueue::service_queues(10.into_weight()); + + assert_last_event::( + Event::ProcessingFailed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + error: ProcessMessageError::StackLimitReached, + } + .into(), + ); + + assert!(MessagesProcessed::take().is_empty()); + // Message is gone and not overweight: + assert_pages(&[]); + }); +} + +/// A message that reports `StackLimitReached` will stay in the overweight queue when it is executed +/// by `execute_overweight`. +#[test] +fn execute_overweight_keeps_stack_ov_message() { + use MessageOrigin::*; + build_and_execute::(|| { + // We need to create a mocked message that first reports insufficient weight, and then + // `StackLimitReached`: + IgnoreStackOvError::set(true); + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + MessageQueue::service_queues(0.into_weight()); + + assert_last_event::( + Event::OverweightEnqueued { + id: blake2_256(b"stacklimitreached"), + origin: MessageOrigin::Here, + message_index: 0, + page_index: 0, + } + .into(), + ); + // Does not count as 'processed': + assert!(MessagesProcessed::take().is_empty()); + assert_pages(&[0]); + + // Now let it return `StackLimitReached`. Note that this case would normally not happen, + // since we assume that the top-level execution is the one with the most remaining stack + // depth. + IgnoreStackOvError::set(false); + // Ensure that trying to execute the message does not change any state (besides events). + System::reset_events(); + let storage_noop = StorageNoopGuard::new(); + assert_eq!( + ::execute_overweight(3.into_weight(), (Here, 0, 0)), + Err(ExecuteOverweightError::Other) + ); + assert_last_event::( + Event::ProcessingFailed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + error: ProcessMessageError::StackLimitReached, + } + .into(), + ); + System::reset_events(); + drop(storage_noop); + + // Now let's process it normally: + IgnoreStackOvError::set(true); + assert_eq!( + ::execute_overweight(1.into_weight(), (Here, 0, 0)) + .unwrap(), + 1.into_weight() + ); + + assert_last_event::( + Event::Processed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + weight_used: 1.into_weight(), + success: true, + } + .into(), + ); + assert_pages(&[]); + System::reset_events(); + }); +} diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index f3d893bcc1d..2eec606b6d1 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -46,6 +46,8 @@ pub enum ProcessMessageError { /// the case that a queue is re-serviced within the same block after *yielding*. A queue is /// not required to *yield* again when it is being re-serviced withing the same block. Yield, + /// The message could not be processed for reaching the stack depth limit. + StackLimitReached, } /// Can process messages from a specific origin. @@ -96,6 +98,8 @@ pub trait ServiceQueues { /// - `weight_limit`: The maximum amount of dynamic weight that this call can use. /// /// Returns the dynamic weight used by this call; is never greater than `weight_limit`. + /// Should only be called in top-level runtime entry points like `on_initialize` or `on_idle`. + /// Otherwise, stack depth limit errors may be miss-handled. fn service_queues(weight_limit: Weight) -> Weight; /// Executes a message that could not be executed by [`Self::service_queues()`] because it was -- GitLab From 239a23d9cc712aab8c0a87eab7e558e5a149fd42 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:11:07 +0300 Subject: [PATCH 072/107] Fix polkadot parachains not producing blocks until next session (#4269) ... a few sessions too late :(, this already happened on polkadot, so as of now there are no known relay-chains without async backing enabled in runtime, but let's fix it in case someone else wants to repeat our steps. Fixes: https://github.com/paritytech/polkadot-sdk/issues/4226 --------- Signed-off-by: Alexandru Gheorghe --- .../node/network/statement-distribution/src/v2/mod.rs | 11 ++++++++++- .../statement-distribution/src/v2/tests/mod.rs | 6 ++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 68caa5f0e70..118e34e9206 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -826,7 +826,16 @@ pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); state.per_session.retain(|s, _| sessions.contains(s)); - state.unused_topologies.retain(|s, _| sessions.contains(s)); + + let last_session_index = state.unused_topologies.keys().max().copied(); + // Do not clean-up the last saved toplogy unless we moved to the next session + // This is needed because handle_deactive_leaves, gets also called when + // prospective_parachains APIs are not present, so we would actually remove + // the topology without using it because `per_relay_parent` is empty until + // prospective_parachains gets enabled + state + .unused_topologies + .retain(|s, _| sessions.contains(s) || last_session_index == Some(*s)); } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 8dda7219cd1..3d987d3fc43 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -509,6 +509,12 @@ async fn setup_test_and_connect_peers( // Send gossip topology and activate leaf. if send_topology_before_leaf { send_new_topology(overseer, state.make_dummy_topology()).await; + // Send cleaning up of a leaf to make sure it does not clear the save topology as well. + overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(Hash::random()), + ))) + .await; activate_leaf(overseer, &test_leaf, &state, true, vec![]).await; } else { activate_leaf(overseer, &test_leaf, &state, true, vec![]).await; -- GitLab From c26cf3f6f2d2b7f7783703308ece440c338459f8 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Thu, 25 Apr 2024 12:16:12 +0200 Subject: [PATCH 073/107] Do not re-prepare PVFs if not needed (#4211) Currently, PVFs are re-prepared if any execution environment parameter changes. As we've recently seen on Kusama and Polkadot, that may lead to a severe finality lag because every validator has to re-prepare every PVF. That cannot be avoided altogether; however, we could cease re-preparing PVFs when a change in the execution environment can't lead to a change in the artifact itself. For example, it's clear that changing the execution timeout cannot affect the artifact. In this PR, I'm introducing a separate hash for the subset of execution environment parameters that changes only if a preparation-related parameter changes. It introduces some minor code duplication, but without that, the scope of changes would be much bigger. TODO: - [x] Add a test to ensure the artifact is not re-prepared if non-preparation-related parameter is changed - [x] Add a test to ensure the artifact is re-prepared if a preparation-related parameter is changed - [x] Add comments, warnings, and, possibly, a test to ensure a new parameter ever added to the executor environment parameters will be evaluated by the author of changes with respect to its artifact preparation impact and added to the new hash preimage if needed. Closes #4132 --- polkadot/node/core/pvf/src/artifacts.rs | 19 ++-- polkadot/node/core/pvf/tests/it/main.rs | 72 +++++++++++++- polkadot/primitives/src/lib.rs | 2 +- polkadot/primitives/src/v7/executor_params.rs | 99 +++++++++++++++++++ polkadot/primitives/src/v7/mod.rs | 4 +- prdoc/pr_4211.prdoc | 15 +++ 6 files changed, 201 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_4211.prdoc diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 6288755526d..a3a48b61acb 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -58,7 +58,7 @@ use crate::{host::PrecheckResultSender, worker_interface::WORKER_DIR_PREFIX}; use always_assert::always; use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; -use polkadot_primitives::ExecutorParamsHash; +use polkadot_primitives::ExecutorParamsPrepHash; use std::{ collections::HashMap, fs, @@ -85,22 +85,27 @@ pub fn generate_artifact_path(cache_path: &Path) -> PathBuf { artifact_path } -/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. +/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of preparation-related +/// executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { pub(crate) code_hash: ValidationCodeHash, - pub(crate) executor_params_hash: ExecutorParamsHash, + pub(crate) executor_params_prep_hash: ExecutorParamsPrepHash, } impl ArtifactId { /// Creates a new artifact ID with the given hash. - pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self { - Self { code_hash, executor_params_hash } + pub fn new( + code_hash: ValidationCodeHash, + executor_params_prep_hash: ExecutorParamsPrepHash, + ) -> Self { + Self { code_hash, executor_params_prep_hash } } - /// Returns an artifact ID that corresponds to the PVF with given executor params. + /// Returns an artifact ID that corresponds to the PVF with given preparation-related + /// executor parameters. pub fn from_pvf_prep_data(pvf: &PvfPrepData) -> Self { - Self::new(pvf.code_hash(), pvf.executor_params().hash()) + Self::new(pvf.code_hash(), pvf.executor_params().prep_hash()) } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 56cc681aff3..6961b93832a 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -26,7 +26,7 @@ use polkadot_node_core_pvf::{ ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; -use polkadot_primitives::{ExecutorParam, ExecutorParams}; +use polkadot_primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use std::{io::Write, time::Duration}; use tokio::sync::Mutex; @@ -559,3 +559,73 @@ async fn nonexistent_cache_dir() { .await .unwrap(); } + +// Checks the the artifact is not re-prepared when the executor environment parameters change +// in a way not affecting the preparation +#[tokio::test] +async fn artifact_does_not_reprepare_on_non_meaningful_exec_parameter_change() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await; + let cache_dir = host.cache_dir.path(); + + let set1 = ExecutorParams::default(); + let set2 = + ExecutorParams::from(&[ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2500)][..]); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + + let md1 = { + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + std::fs::metadata(artifact_path.path()).unwrap() + }; + + // FS times are not monotonical so we wait 2 secs here to be sure that the creation time of the + // second attifact will be different + tokio::time::sleep(Duration::from_secs(2)).await; + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + + let md2 = { + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + std::fs::metadata(artifact_path.path()).unwrap() + }; + + assert_eq!(md1.created().unwrap(), md2.created().unwrap()); +} + +// Checks if the artifact is re-prepared if the re-preparation is needed by the nature of +// the execution environment parameters change +#[tokio::test] +async fn artifact_does_reprepare_on_meaningful_exec_parameter_change() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await; + let cache_dir = host.cache_dir.path(); + + let set1 = ExecutorParams::default(); + let set2 = + ExecutorParams::from(&[ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 60000)][..]); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + + assert_eq!(cache_dir_contents.len(), 2); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + + assert_eq!(cache_dir_contents.len(), 3); // new artifact has been added +} diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index d4eeb3cc3d2..01f393086a6 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -44,7 +44,7 @@ pub use v7::{ CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, - ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, diff --git a/polkadot/primitives/src/v7/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs index 1e19f3b23fe..918a7f17a7e 100644 --- a/polkadot/primitives/src/v7/executor_params.rs +++ b/polkadot/primitives/src/v7/executor_params.rs @@ -152,13 +152,42 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { } } +/// Unit type wrapper around [`type@Hash`] that represents a hash of preparation-related +/// executor parameters. +/// +/// This type is produced by [`ExecutorParams::prep_hash`]. +#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, PartialOrd, Ord, TypeInfo)] +pub struct ExecutorParamsPrepHash(Hash); + +impl sp_std::fmt::Display for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + self.0.fmt(f) + } +} + +impl sp_std::fmt::Debug for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl sp_std::fmt::LowerHex for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + sp_std::fmt::LowerHex::fmt(&self.0, f) + } +} + /// # Deterministically serialized execution environment semantics /// Represents an arbitrary semantics of an arbitrary execution environment, so should be kept as /// abstract as possible. +// // ADR: For mandatory entries, mandatoriness should be enforced in code rather than separating them // into individual fields of the structure. Thus, complex migrations shall be avoided when adding // new entries and removing old ones. At the moment, there's no mandatory parameters defined. If // they show up, they must be clearly documented as mandatory ones. +// +// !!! Any new parameter that does not affect the prepared artifact must be added to the exclusion +// !!! list in `prep_hash()` to avoid unneccessary artifact rebuilds. #[derive( Clone, Debug, Default, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize, )] @@ -175,6 +204,28 @@ impl ExecutorParams { ExecutorParamsHash(BlakeTwo256::hash(&self.encode())) } + /// Returns hash of preparation-related executor parameters + pub fn prep_hash(&self) -> ExecutorParamsPrepHash { + use ExecutorParam::*; + + let mut enc = b"prep".to_vec(); + + self.0 + .iter() + .flat_map(|param| match param { + MaxMemoryPages(..) => None, + StackLogicalMax(..) => Some(param), + StackNativeMax(..) => None, + PrecheckingMaxMemory(..) => None, + PvfPrepTimeout(..) => Some(param), + PvfExecTimeout(..) => None, + WasmExtBulkMemory => Some(param), + }) + .for_each(|p| enc.extend(p.encode())); + + ExecutorParamsPrepHash(BlakeTwo256::hash(&enc)) + } + /// Returns a PVF preparation timeout, if any pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option { for param in &self.0 { @@ -336,3 +387,51 @@ impl From<&[ExecutorParam]> for ExecutorParams { ExecutorParams(arr.to_vec()) } } + +// This test ensures the hash generated by `prep_hash()` changes if any preparation-related +// executor parameter changes. If you're adding a new executor parameter, you must add it into +// this test, and if changing that parameter may not affect the artifact produced on the +// preparation step, it must be added to the list of exlusions in `pre_hash()` as well. +// See also `prep_hash()` comments. +#[test] +fn ensure_prep_hash_changes() { + use ExecutorParam::*; + let ep = ExecutorParams::from( + &[ + MaxMemoryPages(0), + StackLogicalMax(0), + StackNativeMax(0), + PrecheckingMaxMemory(0), + PvfPrepTimeout(PvfPrepKind::Precheck, 0), + PvfPrepTimeout(PvfPrepKind::Prepare, 0), + PvfExecTimeout(PvfExecKind::Backing, 0), + PvfExecTimeout(PvfExecKind::Approval, 0), + WasmExtBulkMemory, + ][..], + ); + + for p in ep.iter() { + let (ep1, ep2) = match p { + MaxMemoryPages(_) => continue, + StackLogicalMax(_) => ( + ExecutorParams::from(&[StackLogicalMax(1)][..]), + ExecutorParams::from(&[StackLogicalMax(2)][..]), + ), + StackNativeMax(_) => continue, + PrecheckingMaxMemory(_) => continue, + PvfPrepTimeout(PvfPrepKind::Precheck, _) => ( + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Precheck, 1)][..]), + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Precheck, 2)][..]), + ), + PvfPrepTimeout(PvfPrepKind::Prepare, _) => ( + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Prepare, 1)][..]), + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Prepare, 2)][..]), + ), + PvfExecTimeout(_, _) => continue, + WasmExtBulkMemory => + (ExecutorParams::default(), ExecutorParams::from(&[WasmExtBulkMemory][..])), + }; + + assert_ne!(ep1.prep_hash(), ep2.prep_hash()); + } +} diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index 5647bfe68d5..8a059408496 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -62,7 +62,9 @@ pub mod executor_params; pub mod slashing; pub use async_backing::AsyncBackingParams; -pub use executor_params::{ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash}; +pub use executor_params::{ + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, +}; mod metrics; pub use metrics::{ diff --git a/prdoc/pr_4211.prdoc b/prdoc/pr_4211.prdoc new file mode 100644 index 00000000000..161dc8485e8 --- /dev/null +++ b/prdoc/pr_4211.prdoc @@ -0,0 +1,15 @@ +title: "Re-prepare PVF artifacts only if needed" + +doc: + - audience: Node Dev + description: | + When a change in the executor environment parameters can not affect the prepared artifact, + it is preserved without recompilation and used for future executions. That mitigates + situations where every unrelated executor parameter change resulted in re-preparing every + artifact on every validator, causing a significant finality lag. + +crates: + - name: polkadot-node-core-pvf + bump: minor + - name: polkadot-primitives + bump: minor -- GitLab From ff2b178206f9952c3337638659450c67fd700e7e Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 25 Apr 2024 22:01:05 +1000 Subject: [PATCH 074/107] remote-externalities: retry get child keys query (#4280) --- .../frame/remote-externalities/src/lib.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index e429d39669f..58cb901470c 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -830,19 +830,22 @@ where child_prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { - // This is deprecated and will generate a warning which causes the CI to fail. - #[allow(warnings)] - let child_keys = substrate_rpc_client::ChildStateApi::storage_keys( - client, - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix, - Some(at), - ) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + let retry_strategy = + FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); + let get_child_keys_closure = || { + #[allow(deprecated)] + substrate_rpc_client::ChildStateApi::storage_keys( + client, + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + child_prefix.clone(), + Some(at), + ) + }; + let child_keys = + Retry::spawn(retry_strategy, get_child_keys_closure).await.map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; debug!( target: LOG_TARGET, -- GitLab From c9923cd7feb9e7c6337f0942abd3279468df5559 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 25 Apr 2024 16:52:24 +0300 Subject: [PATCH 075/107] rename fragment_tree folder to fragment_chain (#4294) Makes https://github.com/paritytech/polkadot-sdk/pull/4035 easier to review --- .../src/{fragment_tree => fragment_chain}/mod.rs | 0 .../src/{fragment_tree => fragment_chain}/tests.rs | 0 polkadot/node/core/prospective-parachains/src/lib.rs | 12 ++++++------ 3 files changed, 6 insertions(+), 6 deletions(-) rename polkadot/node/core/prospective-parachains/src/{fragment_tree => fragment_chain}/mod.rs (100%) rename polkadot/node/core/prospective-parachains/src/{fragment_tree => fragment_chain}/tests.rs (100%) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs similarity index 100% rename from polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs rename to polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs similarity index 100% rename from polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs rename to polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index f5d50fb74fa..0b1a2e034a2 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -55,13 +55,13 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, - fragment_tree::{ + fragment_chain::{ CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope, }, }; mod error; -mod fragment_tree; +mod fragment_chain; #[cfg(test)] mod tests; @@ -349,7 +349,7 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { struct ImportablePendingAvailability { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - compact: crate::fragment_tree::PendingAvailability, + compact: crate::fragment_chain::PendingAvailability, } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -394,7 +394,7 @@ async fn preprocess_candidates_pending_availability( relay_parent_number: relay_parent.number, relay_parent_storage_root: relay_parent.storage_root, }, - compact: crate::fragment_tree::PendingAvailability { + compact: crate::fragment_chain::PendingAvailability { candidate_hash: pending.candidate_hash, relay_parent, }, @@ -675,7 +675,7 @@ fn answer_hypothetical_frontier_request( let candidate_hash = c.candidate_hash(); let hypothetical = match c { HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => - fragment_tree::HypotheticalCandidate::Complete { + fragment_chain::HypotheticalCandidate::Complete { receipt: Cow::Borrowed(receipt), persisted_validation_data: Cow::Borrowed(persisted_validation_data), }, @@ -683,7 +683,7 @@ fn answer_hypothetical_frontier_request( parent_head_data_hash, candidate_relay_parent, .. - } => fragment_tree::HypotheticalCandidate::Incomplete { + } => fragment_chain::HypotheticalCandidate::Incomplete { relay_parent: *candidate_relay_parent, parent_head_data_hash: *parent_head_data_hash, }, -- GitLab From 8f5c8f735af9048b83957821db7fb363e89e919f Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 25 Apr 2024 17:04:20 +0200 Subject: [PATCH 076/107] Update approval-voting banchmarks base values (#4283) --- .../benches/approval-voting-regression-bench.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 7157362a79c..9a5f0d29dbd 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -77,12 +77,12 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 52944.7000, 0.001), - ("Sent to peers", 63532.2000, 0.001), + ("Received from peers", 52942.4600, 0.001), + ("Sent to peers", 63547.0330, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 7.7883, 0.1), - ("approval-voting", 10.4655, 0.1), + ("approval-distribution", 7.0317, 0.1), + ("approval-voting", 9.5751, 0.1), ])); if messages.is_empty() { -- GitLab From dd5b06e622c6c5c301a1554286ec1f4995c7daca Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 25 Apr 2024 17:06:37 +0200 Subject: [PATCH 077/107] [subsystem-benchmarks] Log standart deviation for subsystem-benchmarks (#4285) Should help us to understand more what's happening between individual runs and possibly adjust the number of runs --- polkadot/node/subsystem-bench/src/lib/usage.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 59296746ec3..bfaac3265a2 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -161,6 +161,13 @@ impl ResourceUsage { for (resource_name, values) in by_name { let total = values.iter().map(|v| v.total).sum::() / values.len() as f64; let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64; + let per_block_sd = + standard_deviation(&values.iter().map(|v| v.per_block).collect::>()); + println!( + "[{}] standart_deviation {:.2}%", + resource_name, + per_block_sd / per_block * 100.0 + ); average.push(Self { resource_name, total, per_block }); } average @@ -179,3 +186,11 @@ pub struct ChartItem { pub unit: String, pub value: f64, } + +fn standard_deviation(values: &[f64]) -> f64 { + let n = values.len() as f64; + let mean = values.iter().sum::() / n; + let variance = values.iter().map(|v| (v - mean).powi(2)).sum::() / (n - 1.0); + + variance.sqrt() +} -- GitLab From 8f8c49deffe56567ba5cde0e1047de15b660bb0e Mon Sep 17 00:00:00 2001 From: Noah Jelich <12912633+njelich@users.noreply.github.com> Date: Fri, 26 Apr 2024 09:03:53 +0200 Subject: [PATCH 078/107] Fix bad links (#4231) The solochain template links to parachain template instead of solochain. --- templates/solochain/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 6390c9524ce..37c65797dcb 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -4,10 +4,10 @@ A fresh [Substrate](https://substrate.io/) node, ready for hacking :rocket: A standalone version of this template is available for each release of Polkadot in the [Substrate Developer Hub Parachain -Template](https://github.com/substrate-developer-hub/substrate-parachain-template/) +Template](https://github.com/substrate-developer-hub/substrate-node-template/) repository. The parachain template is generated directly at each Polkadot -release branch from the [Node Template in -Substrate](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template) +release branch from the [Solochain Template in +Substrate](https://github.com/paritytech/polkadot-sdk/tree/master/templates/solochain) upstream It is usually best to use the stand-alone version to start a new project. All -- GitLab From e8f7c81db66abb40802c582c22041aa63c78ddff Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 26 Apr 2024 11:16:03 +0300 Subject: [PATCH 079/107] [balances] Safeguard against consumer ref underflow (#3865) There are some accounts that do not have a consumer ref while having a reserve. This adds a fail-safe mechanism to trigger in the case that `does_consume` is true, but the assumption of `consumer>0` is not. This should prevent those accounts from loosing balance and the TI from getting messed up even more, but is not an "ideal" fix. TBH an ideal fix is not possible, since on-chain data is in an invalid state. --------- Signed-off-by: Oliver Tale-Yazdi --- prdoc/pr_3865.prdoc | 11 ++ substrate/frame/balances/Cargo.toml | 1 + substrate/frame/balances/src/lib.rs | 7 ++ .../frame/balances/src/tests/general_tests.rs | 111 ++++++++++++++++++ substrate/frame/balances/src/tests/mod.rs | 20 +++- substrate/frame/balances/src/types.rs | 2 +- 6 files changed, 150 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_3865.prdoc create mode 100644 substrate/frame/balances/src/tests/general_tests.rs diff --git a/prdoc/pr_3865.prdoc b/prdoc/pr_3865.prdoc new file mode 100644 index 00000000000..8e39c04825b --- /dev/null +++ b/prdoc/pr_3865.prdoc @@ -0,0 +1,11 @@ +title: "Balances: add failsafe for consumer ref underflow" + +doc: + - audience: Runtime Dev + description: | + Pallet balances now handles the case that historic accounts violate a invariant that they should have a consumer ref on `reserved > 0` balance. + This disallows such accounts from reaping and should prevent TI from getting messed up even more. + +crates: + - name: pallet-balances + bump: patch diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 28eabdaf506..1cc9ac5d8fd 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -28,6 +28,7 @@ docify = "0.2.8" [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } +frame-support = { path = "../support", features = ["experimental"] } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } paste = "1.0.12" diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 685b12499ac..bd811955d63 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -954,6 +954,13 @@ pub mod pallet { if !did_consume && does_consume { frame_system::Pallet::::inc_consumers(who)?; } + if does_consume && frame_system::Pallet::::consumers(who) == 0 { + // NOTE: This is a failsafe and should not happen for normal accounts. A normal + // account should have gotten a consumer ref in `!did_consume && does_consume` + // at some point. + log::error!(target: LOG_TARGET, "Defensively bumping a consumer ref."); + frame_system::Pallet::::inc_consumers(who)?; + } if did_provide && !does_provide { // This could reap the account so must go last. frame_system::Pallet::::dec_providers(who).map_err(|r| { diff --git a/substrate/frame/balances/src/tests/general_tests.rs b/substrate/frame/balances/src/tests/general_tests.rs new file mode 100644 index 00000000000..0f3e015d0a8 --- /dev/null +++ b/substrate/frame/balances/src/tests/general_tests.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::{ + system::AccountInfo, + tests::{ensure_ti_valid, Balances, ExtBuilder, System, Test, TestId, UseSystem}, + AccountData, ExtraFlags, TotalIssuance, +}; +use frame_support::{ + assert_noop, assert_ok, hypothetically, + traits::{ + fungible::{Mutate, MutateHold}, + tokens::Precision, + }, +}; +use sp_runtime::DispatchError; + +/// There are some accounts that have one consumer ref too few. These accounts are at risk of losing +/// their held (reserved) balance. They do not just lose it - it is also not accounted for in the +/// Total Issuance. Here we test the case that the account does not reap in such a case, but gets +/// one consumer ref for its reserved balance. +#[test] +fn regression_historic_acc_does_not_evaporate_reserve() { + ExtBuilder::default().build_and_execute_with(|| { + UseSystem::set(true); + let (alice, bob) = (0, 1); + // Alice is in a bad state with consumer == 0 && reserved > 0: + Balances::set_balance(&alice, 100); + TotalIssuance::::put(100); + ensure_ti_valid(); + + assert_ok!(Balances::hold(&TestId::Foo, &alice, 10)); + // This is the issue of the account: + System::dec_consumers(&alice); + + assert_eq!( + System::account(&alice), + AccountInfo { + data: AccountData { + free: 90, + reserved: 10, + frozen: 0, + flags: ExtraFlags(1u128 << 127), + }, + nonce: 0, + consumers: 0, // should be 1 on a good acc + providers: 1, + sufficients: 0, + } + ); + + ensure_ti_valid(); + + // Reaping the account is prevented by the new logic: + assert_noop!( + Balances::transfer_allow_death(Some(alice).into(), bob, 90), + DispatchError::ConsumerRemaining + ); + assert_noop!( + Balances::transfer_all(Some(alice).into(), bob, false), + DispatchError::ConsumerRemaining + ); + + // normal transfers still work: + hypothetically!({ + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + // Alice got back her consumer ref: + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + hypothetically!({ + assert_ok!(Balances::transfer_all(Some(alice).into(), bob, true)); + // Alice got back her consumer ref: + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + + // un-reserving all does not add a consumer ref: + hypothetically!({ + assert_ok!(Balances::release(&TestId::Foo, &alice, 10, Precision::Exact)); + assert_eq!(System::consumers(&alice), 0); + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + assert_eq!(System::consumers(&alice), 0); + ensure_ti_valid(); + }); + // un-reserving some does add a consumer ref: + hypothetically!({ + assert_ok!(Balances::release(&TestId::Foo, &alice, 5, Precision::Exact)); + assert_eq!(System::consumers(&alice), 1); + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + }); +} diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 234fe6eaf2c..0abf2251290 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; +use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet, TotalIssuance}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ assert_err, assert_noop, assert_ok, assert_storage_noop, derive_impl, @@ -47,6 +47,7 @@ mod currency_tests; mod dispatchable_tests; mod fungible_conformance_tests; mod fungible_tests; +mod general_tests; mod reentrancy_tests; type Block = frame_system::mocking::MockBlock; @@ -278,6 +279,23 @@ pub fn info_from_weight(w: Weight) -> DispatchInfo { DispatchInfo { weight: w, ..Default::default() } } +/// Check that the total-issuance matches the sum of all accounts' total balances. +pub fn ensure_ti_valid() { + let mut sum = 0; + + for acc in frame_system::Account::::iter_keys() { + if UseSystem::get() { + let data = frame_system::Pallet::::account(acc); + sum += data.data.total(); + } else { + let data = crate::Account::::get(acc); + sum += data.total(); + } + } + + assert_eq!(TotalIssuance::::get(), sum, "Total Issuance wrong"); +} + #[test] fn weights_sane() { let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); diff --git a/substrate/frame/balances/src/types.rs b/substrate/frame/balances/src/types.rs index 69d33bb023f..3e36a83575c 100644 --- a/substrate/frame/balances/src/types.rs +++ b/substrate/frame/balances/src/types.rs @@ -111,7 +111,7 @@ pub struct AccountData { const IS_NEW_LOGIC: u128 = 0x80000000_00000000_00000000_00000000u128; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct ExtraFlags(u128); +pub struct ExtraFlags(pub(crate) u128); impl Default for ExtraFlags { fn default() -> Self { Self(IS_NEW_LOGIC) -- GitLab From c66d8a84687f5d68c0192122aa513b4b340794ca Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 26 Apr 2024 12:24:42 +0300 Subject: [PATCH 080/107] Bump bridges relay version + uncomment bridges zombeinet tests (#4289) TODOs: - [x] wait and see if test `1` works; - [x] ~think of whether we need remaining tests.~ I think we should keep it - will try to revive and update it --- .gitlab/pipeline/zombienet.yml | 4 +--- .gitlab/pipeline/zombienet/bridges.yml | 4 ++-- ...hen-idle.js => multiple-headers-synced.js} | 22 +++++-------------- .../rococo-to-westend.zndsl | 20 +++++++++++++++++ .../run.sh | 2 +- .../westend-to-rococo.zndsl | 20 +++++++++++++++++ .../rococo-to-westend.zndsl | 8 ------- .../westend-to-rococo.zndsl | 7 ------ ...ridges_zombienet_tests_injected.Dockerfile | 2 +- 9 files changed, 51 insertions(+), 38 deletions(-) rename bridges/testing/framework/js-helpers/{only-mandatory-headers-synced-when-idle.js => multiple-headers-synced.js} (61%) create mode 100644 bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl rename bridges/testing/tests/{0002-mandatory-headers-synced-while-idle => 0002-free-headers-synced-while-idle}/run.sh (90%) create mode 100644 bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl delete mode 100644 bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl delete mode 100644 bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index e306cb43c02..52948e1eb71 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -12,6 +12,4 @@ include: # polkadot tests - .gitlab/pipeline/zombienet/polkadot.yml # bridges tests - # TODO: https://github.com/paritytech/parity-bridges-common/pull/2884 - # commenting until we have a new relatye, compatible with updated fees scheme - # - .gitlab/pipeline/zombienet/bridges.yml + - .gitlab/pipeline/zombienet/bridges.yml diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml index 4278f59b1e9..9d7a8b93119 100644 --- a/.gitlab/pipeline/zombienet/bridges.yml +++ b/.gitlab/pipeline/zombienet/bridges.yml @@ -55,9 +55,9 @@ zombienet-bridges-0001-asset-transfer-works: - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0001-asset-transfer --docker - echo "Done" -zombienet-bridges-0002-mandatory-headers-synced-while-idle: +zombienet-bridges-0002-free-headers-synced-while-idle: extends: - .zombienet-bridges-common script: - - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-mandatory-headers-synced-while-idle --docker + - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-free-headers-synced-while-idle --docker - echo "Done" diff --git a/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/multiple-headers-synced.js similarity index 61% rename from bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/multiple-headers-synced.js index 979179245eb..a30efc82165 100644 --- a/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js +++ b/bridges/testing/framework/js-helpers/multiple-headers-synced.js @@ -10,33 +10,23 @@ async function run(nodeName, networkInfo, args) { // start listening to new blocks let totalGrandpaHeaders = 0; - let initialParachainHeaderImported = false; + let totalParachainHeaders = 0; api.rpc.chain.subscribeNewHeads(async function (header) { const apiAtParent = await api.at(header.parentHash); const apiAtCurrent = await api.at(header.hash); const currentEvents = await apiAtCurrent.query.system.events(); - totalGrandpaHeaders += await utils.ensureOnlyMandatoryGrandpaHeadersImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - initialParachainHeaderImported = await utils.ensureOnlyInitialParachainHeaderImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); + totalGrandpaHeaders += await utils.countGrandpaHeaderImports(bridgedChain, currentEvents); + totalParachainHeaders += await utils.countParachainHeaderImports(bridgedChain, currentEvents); }); // wait given time await new Promise(resolve => setTimeout(resolve, exitAfterSeconds * 1000)); - // if we haven't seen any new GRANDPA or parachain headers => fail - if (totalGrandpaHeaders == 0) { + // if we haven't seen many (>1) new GRANDPA or parachain headers => fail + if (totalGrandpaHeaders <= 1) { throw new Error("No bridged relay chain headers imported"); } - if (!initialParachainHeaderImported) { + if (totalParachainHeaders <= 1) { throw new Error("No bridged parachain headers imported"); } } diff --git a/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl new file mode 100644 index 00000000000..0f779caa87c --- /dev/null +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl @@ -0,0 +1,20 @@ +Description: While relayer is idle, we only sync free Rococo (and a single Rococo BH) headers to Westend BH. +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds + +# ensure that we have synced multiple relay and parachain headers while idle. This includes both +# headers that were generated while relay was offline and those in the next 100 seconds while script is active. +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/multiple-headers-synced.js with "300,rococo-at-westend" within 600 seconds + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh similarity index 90% rename from bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh rename to bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh index 32419dc84f5..9d19a9688f9 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh @@ -22,7 +22,7 @@ echo # which is expected to be 60 seconds for the test environment. echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 -${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid +${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir finality_relayer_pid parachains_relayer_pid messages_relayer_pid run_zndsl ${BASH_SOURCE%/*}/rococo-to-westend.zndsl $westend_dir run_zndsl ${BASH_SOURCE%/*}/westend-to-rococo.zndsl $rococo_dir diff --git a/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl new file mode 100644 index 00000000000..7a6f1ec379d --- /dev/null +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl @@ -0,0 +1,20 @@ +Description: While relayer is idle, we only sync free Westend (and a single Westend BH) headers to Rococo BH. +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie has inital balance +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave has inital balance +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds + +# ensure that we have synced multiple relay and parachain headers while idle. This includes both +# headers that were generated while relay was offline and those in the next 100 seconds while script is active. +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/multiple-headers-synced.js with "300,westend-at-rococo" within 600 seconds + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl deleted file mode 100644 index 6e381f53773..00000000000 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl +++ /dev/null @@ -1,8 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH. -Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml -Creds: config - -# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds - diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl deleted file mode 100644 index b4b3e436791..00000000000 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl +++ /dev/null @@ -1,7 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH. -Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml -Creds: config - -# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 938f5cc45a1..196ba861f50 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.2.1 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.5.0 # metadata ARG VCS_REF -- GitLab From d212fc7a41fc72299913737c5fea2f3fcfe0a253 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Fri, 26 Apr 2024 13:24:03 +0200 Subject: [PATCH 081/107] review-bot: reverted #4271 and added `workflow_dispatch` (#4293) This PR includes two changes: - added `workflow_dispatch` to review bot - reverted #4271 ### Added `workflow_dispatch` to review bot This allows us, in the case that review-bot fails for some fork reasons, to trigger it manually ensuring that we can overcame the problem with the multiple actions while we look for a solution. image ### Reverted #4271 Unfortunately, the changes added in #4271 do not work in forks. Here is a lengthy discussion of many individuals facing the same problem as me: - [GitHub Action `pull_request` attribute empty in `workflow_run` event object for PR from forked repo #25220](https://github.com/orgs/community/discussions/25220) So I had to revert it (but I updated the dependencies to latest). #### Miscellaneous changes I added a debug log at the end of review bot in case it fails so we can easily debug it without having to make a lot of boilerplate and forks to duplicate the environment. --- .github/workflows/review-bot.yml | 19 ++++++++++++++++++- .github/workflows/review-trigger.yml | 13 +++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index fb877357b23..f1401406ae4 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -5,6 +5,12 @@ on: - Review-Trigger types: - completed + workflow_dispatch: + inputs: + pr-number: + description: "Number of the PR to evaluate" + required: true + type: number jobs: review-approvals: @@ -17,6 +23,12 @@ jobs: with: app-id: ${{ secrets.REVIEW_APP_ID }} private-key: ${{ secrets.REVIEW_APP_KEY }} + - name: Extract content of artifact + if: ${{ !inputs.pr-number }} + id: number + uses: Bullrich/extract-text-from-artifact@v1.0.1 + with: + artifact-name: pr_number - name: "Evaluates PR reviews and assigns reviewers" uses: paritytech/review-bot@v2.4.0 with: @@ -24,5 +36,10 @@ jobs: team-token: ${{ steps.app_token.outputs.token }} checks-token: ${{ steps.app_token.outputs.token }} # This is extracted from the triggering event - pr-number: ${{ github.event.workflow_run.pull_requests[0].number }} + pr-number: ${{ inputs.pr-number || steps.number.outputs.content }} request-reviewers: true + - name: Log payload + if: ${{ failure() || runner.debug }} + run: echo "::debug::$payload" + env: + payload: ${{ toJson(github.event) }} diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 6437be161d3..ec4a62afc0c 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -58,3 +58,16 @@ jobs: env: GH_TOKEN: ${{ github.token }} COMMENTS: ${{ steps.comments.outputs.users }} + - name: Get PR number + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + echo "Saving PR number: $PR_NUMBER" + mkdir -p ./pr + echo $PR_NUMBER > ./pr/pr_number + - uses: actions/upload-artifact@v4 + name: Save PR number + with: + name: pr_number + path: pr/ + retention-days: 5 -- GitLab From 9a48cd707ed7f4034aadb8dc05065080ad102037 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 26 Apr 2024 14:26:05 +0300 Subject: [PATCH 082/107] Bridges: added helper function to relay single GRANDPA proof + header (#4307) related to https://github.com/paritytech/parity-bridges-common/issues/2962 silent, because the actual code for subcommand is added in the `parity-bridges-common` repo, where binary lives --------- Co-authored-by: Adrian Catangiu --- .../src/cli/relay_headers.rs | 39 ++++++++++++++++++- .../lib-substrate-relay/src/finality/mod.rs | 37 +++++++++++++++++- 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index cf1957c7323..093f98ef21e 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -19,7 +19,10 @@ use async_trait::async_trait; use structopt::StructOpt; -use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; +use relay_utils::{ + metrics::{GlobalMetrics, StandaloneMetric}, + UniqueSaturatedInto, +}; use crate::{ cli::{bridge::*, chain_schema::*, PrometheusParams}, @@ -48,6 +51,21 @@ pub struct RelayHeadersParams { prometheus_params: PrometheusParams, } +/// Single header relaying params. +#[derive(StructOpt)] +pub struct RelayHeaderParams { + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + /// Number of the source chain header that we want to relay. It must have a persistent + /// storage proof at the [`Self::source`] node, otherwise the command will fail. + #[structopt(long)] + number: u128, +} + impl RelayHeadersParams { fn headers_to_relay(&self) -> HeadersToRelay { match (self.only_mandatory_headers, self.only_free_headers) { @@ -89,4 +107,23 @@ pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { ) .await } + + /// Relay single header. No checks are made to ensure that transaction will succeed. + async fn relay_header(data: RelayHeaderParams) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let target_transactions_mortality = data.target_sign.target_transactions_mortality; + let target_sign = data.target_sign.to_keypair::()?; + + crate::finality::relay_single_header::( + source_client, + target_client, + crate::TransactionParams { + signer: target_sign, + mortality: target_transactions_mortality, + }, + data.number.unique_saturated_into(), + ) + .await + } } diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index a06857ae1d9..0293e1da224 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -25,13 +25,15 @@ use crate::{ use async_trait::async_trait; use bp_header_chain::justification::{GrandpaJustification, JustificationVerificationContext}; -use finality_relay::{FinalityPipeline, FinalitySyncPipeline, HeadersToRelay}; +use finality_relay::{ + FinalityPipeline, FinalitySyncPipeline, HeadersToRelay, SourceClient, TargetClient, +}; use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, ChainWithTransactions, Client, HashOf, HeaderOf, SyncHeader, }; -use relay_utils::metrics::MetricsParams; +use relay_utils::{metrics::MetricsParams, TrackedTransactionStatus, TransactionTracker}; use sp_core::Pair; use std::{fmt::Debug, marker::PhantomData}; @@ -274,3 +276,34 @@ pub async fn run( .await .map_err(|e| anyhow::format_err!("{}", e)) } + +/// Relay single header. No checks are made to ensure that transaction will succeed. +pub async fn relay_single_header( + source_client: Client, + target_client: Client, + transaction_params: TransactionParams>, + header_number: BlockNumberOf, +) -> anyhow::Result<()> { + let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; + let Some(proof) = proof else { + return Err(anyhow::format_err!( + "Unable to submit {} header #{} to {}: no finality proof", + P::SourceChain::NAME, + header_number, + P::TargetChain::NAME, + )); + }; + + let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => Err(anyhow::format_err!( + "Transaction with {} header #{} is considered lost at {}", + P::SourceChain::NAME, + header_number, + P::TargetChain::NAME, + )), + } +} -- GitLab From 97f74253387ee43e30c25fd970b5ae4cc1a722d7 Mon Sep 17 00:00:00 2001 From: gui Date: Fri, 26 Apr 2024 21:27:14 +0900 Subject: [PATCH 083/107] Try state: log errors instead of loggin the number of error and discarding them (#4265) Currently we discard errors content We should at least log it. Code now is more similar to what is written in try_on_runtime_upgrade. label should be R0 --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Liam Aharon Co-authored-by: Javier Bullrich --- .../support/src/traits/try_runtime/mod.rs | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/substrate/frame/support/src/traits/try_runtime/mod.rs b/substrate/frame/support/src/traits/try_runtime/mod.rs index bec2dbf549a..c1bf1feb19e 100644 --- a/substrate/frame/support/src/traits/try_runtime/mod.rs +++ b/substrate/frame/support/src/traits/try_runtime/mod.rs @@ -161,22 +161,31 @@ impl TryState Ok(()), Select::All => { - let mut error_count = 0; + let mut errors = Vec::::new(); + for_tuples!(#( - if let Err(_) = Tuple::try_state(n.clone(), targets.clone()) { - error_count += 1; + if let Err(err) = Tuple::try_state(n.clone(), targets.clone()) { + errors.push(err); } )*); - if error_count > 0 { + if !errors.is_empty() { log::error!( target: "try-runtime", - "{} pallets exited with errors while executing try_state checks.", - error_count + "Detected errors while executing `try_state`:", ); + errors.iter().for_each(|err| { + log::error!( + target: "try-runtime", + "{:?}", + err + ); + }); + return Err( - "Detected errors while executing try_state checks. See logs for more info." + "Detected errors while executing `try_state` checks. See logs for more \ + info." .into(), ) } -- GitLab From 988e30f102b155ab68d664d62ac5c73da171659a Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Fri, 26 Apr 2024 16:28:08 +0300 Subject: [PATCH 084/107] Implementation of the new validator disabling strategy (#2226) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/paritytech/polkadot-sdk/issues/1966, https://github.com/paritytech/polkadot-sdk/issues/1963 and https://github.com/paritytech/polkadot-sdk/issues/1962. Disabling strategy specification [here](https://github.com/paritytech/polkadot-sdk/pull/2955). (Updated 13/02/2024) Implements: * validator disabling for a whole era instead of just a session * no more than 1/3 of the validators in the active set are disabled Removes: * `DisableStrategy` enum - now each validator committing an offence is disabled. * New era is not forced if too many validators are disabled. Before this PR not all offenders were disabled. A decision was made based on [`enum DisableStrategy`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/primitives/staking/src/offence.rs#L54). Some offenders were disabled for a whole era, some just for a session, some were not disabled at all. This PR changes the disabling behaviour. Now a validator committing an offense is disabled immediately till the end of the current era. Some implementation notes: * `OffendingValidators` in pallet session keeps all offenders (this is not changed). However its type is changed from `Vec<(u32, bool)>` to `Vec`. The reason is simple - each offender is getting disabled so the bool doesn't make sense anymore. * When a validator is disabled it is first added to `OffendingValidators` and then to `DisabledValidators`. This is done in [`add_offending_validator`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/slashing.rs#L325) from staking pallet. * In [`rotate_session`](https://github.com/paritytech/polkadot-sdk/blob/bdbe98297032e21a553bf191c530690b1d591405/substrate/frame/session/src/lib.rs#L623) the `end_session` also calls [`end_era`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/pallet/impls.rs#L490) when an era ends. In this case `OffendingValidators` are cleared **(1)**. * Then in [`rotate_session`](https://github.com/paritytech/polkadot-sdk/blob/bdbe98297032e21a553bf191c530690b1d591405/substrate/frame/session/src/lib.rs#L623) `DisabledValidators` are cleared **(2)** * And finally (still in `rotate_session`) a call to [`start_session`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/pallet/impls.rs#L430) repopulates the disabled validators **(3)**. * The reason for this complication is that session pallet knows nothing abut eras. To overcome this on each new session the disabled list is repopulated (points 2 and 3). Staking pallet knows when a new era starts so with point 1 it ensures that the offenders list is cleared. --------- Co-authored-by: ordian Co-authored-by: ordian Co-authored-by: Maciej Co-authored-by: Gonçalo Pestana Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: command-bot <> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- .../parachains/src/disputes/slashing.rs | 11 +- polkadot/runtime/test-runtime/src/lib.rs | 3 +- polkadot/runtime/westend/src/lib.rs | 5 +- .../functional/0010-validator-disabling.toml | 2 +- prdoc/pr_2226.prdoc | 28 + substrate/bin/node/runtime/src/lib.rs | 3 +- substrate/frame/babe/src/mock.rs | 3 +- substrate/frame/beefy/src/mock.rs | 3 +- .../test-staking-e2e/src/lib.rs | 162 +--- .../test-staking-e2e/src/mock.rs | 27 +- substrate/frame/fast-unstake/src/mock.rs | 2 +- substrate/frame/grandpa/src/mock.rs | 3 +- substrate/frame/im-online/src/lib.rs | 6 +- substrate/frame/im-online/src/tests.rs | 3 - .../nomination-pools/benchmarking/src/mock.rs | 2 +- .../nomination-pools/test-staking/src/mock.rs | 2 +- .../frame/offences/benchmarking/src/mock.rs | 2 +- substrate/frame/offences/src/lib.rs | 1 - substrate/frame/offences/src/migration.rs | 9 +- substrate/frame/offences/src/mock.rs | 3 +- substrate/frame/root-offences/src/lib.rs | 4 +- substrate/frame/root-offences/src/mock.rs | 3 +- .../frame/session/benchmarking/src/mock.rs | 2 +- substrate/frame/session/src/lib.rs | 2 +- substrate/frame/staking/CHANGELOG.md | 19 + substrate/frame/staking/src/lib.rs | 76 ++ substrate/frame/staking/src/migrations.rs | 57 +- substrate/frame/staking/src/mock.rs | 22 +- substrate/frame/staking/src/pallet/impls.rs | 33 +- substrate/frame/staking/src/pallet/mod.rs | 35 +- substrate/frame/staking/src/slashing.rs | 80 +- substrate/frame/staking/src/tests.rs | 834 ++++++++++-------- substrate/primitives/staking/src/offence.rs | 32 - 33 files changed, 777 insertions(+), 702 deletions(-) create mode 100644 prdoc/pr_2226.prdoc diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index d0c74e4bc95..a61d0c89983 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -64,7 +64,7 @@ use sp_runtime::{ KeyTypeId, Perbill, }; use sp_session::{GetSessionNumber, GetValidatorCount}; -use sp_staking::offence::{DisableStrategy, Kind, Offence, OffenceError, ReportOffence}; +use sp_staking::offence::{Kind, Offence, OffenceError, ReportOffence}; use sp_std::{ collections::{btree_map::Entry, btree_set::BTreeSet}, prelude::*, @@ -134,15 +134,6 @@ where self.time_slot.clone() } - fn disable_strategy(&self) -> DisableStrategy { - match self.kind { - SlashingOffenceKind::ForInvalid => DisableStrategy::Always, - // in the future we might change it based on number of disputes initiated: - // - SlashingOffenceKind::AgainstValid => DisableStrategy::Never, - } - } - fn slash_fraction(&self, _offenders: u32) -> Perbill { self.slash_fraction } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 514643c0a20..d0f1ff0035f 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -313,7 +313,6 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxExposurePageSize: u32 = 64; pub const MaxNominators: u32 = 256; - pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; // Unbounded number of election targets and voters. @@ -349,7 +348,6 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = onchain::OnChainExecution; @@ -364,6 +362,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7924939c79b..03ecd5c070b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -613,7 +613,6 @@ parameter_types! { // this is an unbounded number. We just set it to a reasonably high value, 1 full page // of nominators. pub const MaxNominators: u32 = 64; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxNominations: u32 = ::LIMIT as u32; pub const MaxControllersInDeprecationBatch: u32 = 751; } @@ -634,7 +633,6 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -647,6 +645,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = NominationPools; type WeightInfo = weights::pallet_staking::WeightInfo; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -1649,7 +1648,7 @@ pub mod migrations { } /// Unreleased migrations. Add new ones here: - pub type Unreleased = (); + pub type Unreleased = (pallet_staking::migrations::v15::MigrateV14ToV15,); } /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml index c9d79c5f8f2..806f34d7f76 100644 --- a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml +++ b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml @@ -21,7 +21,7 @@ requests = { memory = "2G", cpu = "1" } [[relaychain.node_groups]] name = "honest-validator" count = 3 - args = ["-lparachain=debug"] + args = ["-lparachain=debug,runtime::staking=debug"] [[relaychain.node_groups]] image = "{{MALUS_IMAGE}}" diff --git a/prdoc/pr_2226.prdoc b/prdoc/pr_2226.prdoc new file mode 100644 index 00000000000..f03540a50f6 --- /dev/null +++ b/prdoc/pr_2226.prdoc @@ -0,0 +1,28 @@ +title: Validator disabling strategy in runtime + +doc: + - audience: Node Operator + description: | + On each committed offence (no matter slashable or not) the offending validator will be + disabled for a whole era. + - audience: Runtime Dev + description: | + The disabling strategy in staking pallet is no longer hardcoded but abstracted away via + `DisablingStrategy` trait. The trait contains a single function (make_disabling_decision) which + is called for each offence. The function makes a decision if (and which) validators should be + disabled. A default implementation is provided - `UpToLimitDisablingStrategy`. It + will be used on Kusama and Polkadot. In nutshell `UpToLimitDisablingStrategy` + disables offenders up to the configured threshold. Offending validators are not disabled for + offences in previous eras. The threshold is controlled via `DISABLING_LIMIT_FACTOR` (a generic + parameter of `UpToLimitDisablingStrategy`). + +migrations: + db: [] + runtime: + - reference: pallet-staking + description: | + Renames `OffendingValidators` storage item to `DisabledValidators` and changes its type from + `Vec<(u32, bool)>` to `Vec`. + +crates: + - name: pallet-staking \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 43c617023bc..0caaa8c7322 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -654,7 +654,6 @@ parameter_types! { pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominators: u32 = 64; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxControllersInDeprecationBatch: u32 = 5900; pub OffchainRepeat: BlockNumber = 5; pub HistoryDepth: u32 = 84; @@ -690,7 +689,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<256>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; @@ -703,6 +701,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index ec54275278e..395a86e6528 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -144,7 +144,6 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(16); pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -174,7 +173,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -187,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 1c55adc8de4..0b87de6bf5d 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -158,7 +158,6 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -188,7 +187,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -201,6 +199,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 83083c91209..c00bb66ea13 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -23,7 +23,6 @@ pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; use sp_core::Get; -use sp_npos_elections::{to_supports, StakedAssignment}; use sp_runtime::Perbill; use crate::mock::RuntimeOrigin; @@ -127,75 +126,48 @@ fn offchainify_works() { } #[test] -/// Replicates the Kusama incident of 8th Dec 2022 and its resolution through the governance +/// Inspired by the Kusama incident of 8th Dec 2022 and its resolution through the governance /// fallback. /// -/// After enough slashes exceeded the `Staking::OffendingValidatorsThreshold`, the staking pallet -/// set `Forcing::ForceNew`. When a new session starts, staking will start to force a new era and -/// calls ::elect(). If at this point EPM and the staking miners did not -/// have enough time to queue a new solution (snapshot + solution submission), the election request -/// fails. If there is no election fallback mechanism in place, EPM enters in emergency mode. -/// Recovery: Once EPM is in emergency mode, subsequent calls to `elect()` will fail until a new -/// solution is added to EPM's `QueuedSolution` queue. This can be achieved through -/// `Call::set_emergency_election_result` or `Call::governance_fallback` dispatchables. Once a new -/// solution is added to the queue, EPM phase transitions to `Phase::Off` and the election flow -/// restarts. Note that in this test case, the emergency throttling is disabled. -fn enters_emergency_phase_after_forcing_before_elect() { +/// Mass slash of validators shouldn't disable more than 1/3 of them (the byzantine threshold). Also +/// no new era should be forced which could lead to EPM entering emergency mode. +fn mass_slash_doesnt_enter_emergency_phase() { let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); - let (ext, pool_state, _) = ExtBuilder::default().epm(epm_builder).build_offchainify(); - - execute_with(ext, || { - log!( - trace, - "current validators (staking): {:?}", - >::validators() - ); - let session_validators_before = Session::validators(); - - roll_to_epm_off(); - assert!(ElectionProviderMultiPhase::current_phase().is_off()); + let staking_builder = StakingExtBuilder::default().validator_count(7); + let (mut ext, _, _) = ExtBuilder::default() + .epm(epm_builder) + .staking(staking_builder) + .build_offchainify(); + ext.execute_with(|| { assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); - // slashes so that staking goes into `Forcing::ForceNew`. - slash_through_offending_threshold(); - assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::ForceNew); + let active_set_size_before_slash = Session::validators().len(); - advance_session_delayed_solution(pool_state.clone()); - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - log_current_time(); + // Slash more than 1/3 of the active validators + let mut slashed = slash_half_the_active_set(); - let era_before_delayed_next = Staking::current_era(); - // try to advance 2 eras. - assert!(start_next_active_era_delayed_solution(pool_state.clone()).is_ok()); - assert_eq!(Staking::current_era(), era_before_delayed_next); - assert!(start_next_active_era(pool_state).is_err()); - assert_eq!(Staking::current_era(), era_before_delayed_next); + let active_set_size_after_slash = Session::validators().len(); - // EPM is still in emergency phase. - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + // active set should stay the same before and after the slash + assert_eq!(active_set_size_before_slash, active_set_size_after_slash); - // session validator set remains the same. - assert_eq!(Session::validators(), session_validators_before); - - // performs recovery through the set emergency result. - let supports = to_supports(&vec![ - StakedAssignment { who: 21, distribution: vec![(21, 10)] }, - StakedAssignment { who: 31, distribution: vec![(21, 10), (31, 10)] }, - StakedAssignment { who: 41, distribution: vec![(41, 10)] }, - ]); - assert!(ElectionProviderMultiPhase::set_emergency_election_result( - RuntimeOrigin::root(), - supports - ) - .is_ok()); + // Slashed validators are disabled up to a limit + slashed.truncate( + pallet_staking::UpToLimitDisablingStrategy::::disable_limit( + active_set_size_after_slash, + ), + ); - // EPM can now roll to signed phase to proceed with elections. The validator set is the - // expected (ie. set through `set_emergency_election_result`). - roll_to_epm_signed(); - //assert!(ElectionProviderMultiPhase::current_phase().is_signed()); - assert_eq!(Session::validators(), vec![21, 31, 41]); - assert_eq!(Staking::current_era(), era_before_delayed_next.map(|e| e + 1)); + // Find the indices of the disabled validators + let active_set = Session::validators(); + let expected_disabled = slashed + .into_iter() + .map(|d| active_set.iter().position(|a| *a == d).unwrap() as u32) + .collect::>(); + + assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); + assert_eq!(Session::disabled_validators(), expected_disabled); }); } @@ -253,77 +225,7 @@ fn continuous_slashes_below_offending_threshold() { } #[test] -/// Slashed validator sets intentions in the same era of slashing. -/// -/// When validators are slashed, they are chilled and removed from the current `VoterList`. Thus, -/// the slashed validator should not be considered in the next validator set. However, if the -/// slashed validator sets its intention to validate again in the same era when it was slashed and -/// chilled, the validator may not be removed from the active validator set across eras, provided -/// it would selected in the subsequent era if there was no slash. Nominators of the slashed -/// validator will also be slashed and chilled, as expected, but the nomination intentions will -/// remain after the validator re-set the intention to be validating again. -/// -/// This behaviour is due to removing implicit chill upon slash -/// . -/// -/// Related to . -fn set_validation_intention_after_chilled() { - use frame_election_provider_support::SortedListProvider; - use pallet_staking::{Event, Forcing, Nominators}; - - let (ext, pool_state, _) = ExtBuilder::default() - .epm(EpmExtBuilder::default()) - .staking(StakingExtBuilder::default()) - .build_offchainify(); - - execute_with(ext, || { - assert_eq!(active_era(), 0); - // validator is part of the validator set. - assert!(Session::validators().contains(&41)); - assert!(::VoterList::contains(&41)); - - // nominate validator 81. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(21), vec![41])); - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - - // validator is slashed. it is removed from the `VoterList` through chilling but in the - // current era, the validator is still part of the active validator set. - add_slash(&41); - assert!(Session::validators().contains(&41)); - assert!(!::VoterList::contains(&41)); - assert_eq!( - staking_events(), - [ - Event::Chilled { stash: 41 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 41, - slash_era: 0, - fraction: Perbill::from_percent(10) - } - ], - ); - - // after the nominator is slashed and chilled, the nominations remain. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - - // validator sets intention to stake again in the same era it was chilled. - assert_ok!(Staking::validate(RuntimeOrigin::signed(41), Default::default())); - - // progress era and check that the slashed validator is still part of the validator - // set. - assert!(start_next_active_era(pool_state).is_ok()); - assert_eq!(active_era(), 1); - assert!(Session::validators().contains(&41)); - assert!(::VoterList::contains(&41)); - - // nominations are still active as before the slash. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - }) -} - -#[test] -/// Active ledger balance may fall below ED if account chills before unbonding. +/// Active ledger balance may fall below ED if account chills before unbounding. /// /// Unbonding call fails if the remaining ledger's stash balance falls below the existential /// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index a727e3bf816..8f1775a7e59 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -35,7 +35,7 @@ use sp_runtime::{ transaction_validity, BuildStorage, PerU16, Perbill, Percent, }; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, SessionIndex, }; use sp_std::prelude::*; @@ -236,7 +236,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub static BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(40); pub HistoryDepth: u32 = 84; } @@ -290,6 +289,8 @@ parameter_types! { /// Upper limit on the number of NPOS nominations. const MAX_QUOTA_NOMINATIONS: u32 = 16; +/// Disabling factor set explicitly to byzantine threshold +pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3; impl pallet_staking::Config for Runtime { type Currency = Balances; @@ -308,7 +309,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = (); type NextNewSession = Session; type MaxExposurePageSize = ConstU32<256>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = BagsList; @@ -320,6 +320,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl frame_system::offchain::SendTransactionTypes for Runtime @@ -871,7 +872,6 @@ pub(crate) fn on_offence_now( offenders, slash_fraction, Staking::eras_start_session_index(now).unwrap(), - DisableStrategy::WhenSlashed, ); } @@ -886,19 +886,16 @@ pub(crate) fn add_slash(who: &AccountId) { ); } -// Slashes enough validators to cross the `Staking::OffendingValidatorsThreshold`. -pub(crate) fn slash_through_offending_threshold() { - let validators = Session::validators(); - let mut remaining_slashes = - ::OffendingValidatorsThreshold::get() * - validators.len() as u32; +// Slashes 1/2 of the active set. Returns the `AccountId`s of the slashed validators. +pub(crate) fn slash_half_the_active_set() -> Vec { + let mut slashed = Session::validators(); + slashed.truncate(slashed.len() / 2); - for v in validators.into_iter() { - if remaining_slashes != 0 { - add_slash(&v); - remaining_slashes -= 1; - } + for v in slashed.iter() { + add_slash(v); } + + slashed } // Slashes a percentage of the active nominators that haven't been slashed yet, with diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index b731cb822f3..d876f9f6171 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -134,7 +134,6 @@ impl pallet_staking::Config for Runtime { type NextNewSession = (); type HistoryDepth = ConstU32<84>; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -145,6 +144,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct BalanceToU256; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 4a21da655e5..2d54f525b1f 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -146,7 +146,6 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -176,7 +175,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -189,6 +187,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 239b47834d1..f91a473e53d 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -104,7 +104,7 @@ use sp_runtime::{ PerThing, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; use sp_staking::{ - offence::{DisableStrategy, Kind, Offence, ReportOffence}, + offence::{Kind, Offence, ReportOffence}, SessionIndex, }; use sp_std::prelude::*; @@ -847,10 +847,6 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn disable_strategy(&self) -> DisableStrategy { - DisableStrategy::Never - } - fn slash_fraction(&self, offenders: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index f9959593494..12333d59ef8 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -50,9 +50,6 @@ fn test_unresponsiveness_slash_fraction() { dummy_offence.slash_fraction(17), Perbill::from_parts(46200000), // 4.62% ); - - // Offline offences should never lead to being disabled. - assert_eq!(dummy_offence.disable_strategy(), DisableStrategy::Never); } #[test] diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index a59f8f3f40e..2752d53a6b9 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -111,7 +111,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; @@ -124,6 +123,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index 2ec47e0d164..93a05ddfae9 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -125,7 +125,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; @@ -138,6 +137,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 27129e73c71..eeaa1364504 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -174,7 +174,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -186,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_im_online::Config for Test { diff --git a/substrate/frame/offences/src/lib.rs b/substrate/frame/offences/src/lib.rs index 1c7ffeca719..a328b2fee4e 100644 --- a/substrate/frame/offences/src/lib.rs +++ b/substrate/frame/offences/src/lib.rs @@ -132,7 +132,6 @@ where &concurrent_offenders, &slash_perbill, offence.session_index(), - offence.disable_strategy(), ); // Deposit the event. diff --git a/substrate/frame/offences/src/migration.rs b/substrate/frame/offences/src/migration.rs index 3b5cf3ce926..199f4749136 100644 --- a/substrate/frame/offences/src/migration.rs +++ b/substrate/frame/offences/src/migration.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, Twox64Concat, }; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; +use sp_staking::offence::OnOffenceHandler; use sp_std::vec::Vec; #[cfg(feature = "try-runtime")] @@ -106,12 +106,7 @@ pub fn remove_deferred_storage() -> Weight { let deferred = >::take(); log::info!(target: LOG_TARGET, "have {} deferred offences, applying.", deferred.len()); for (offences, perbill, session) in deferred.iter() { - let consumed = T::OnOffenceHandler::on_offence( - offences, - perbill, - *session, - DisableStrategy::WhenSlashed, - ); + let consumed = T::OnOffenceHandler::on_offence(offences, perbill, *session); weight = weight.saturating_add(consumed); } diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 31d5f805f3e..9a3120e41ea 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -33,7 +33,7 @@ use sp_runtime::{ BuildStorage, Perbill, }; use sp_staking::{ - offence::{self, DisableStrategy, Kind, OffenceDetails}, + offence::{self, Kind, OffenceDetails}, SessionIndex, }; @@ -51,7 +51,6 @@ impl offence::OnOffenceHandler _offenders: &[OffenceDetails], slash_fraction: &[Perbill], _offence_session: SessionIndex, - _disable_strategy: DisableStrategy, ) -> Weight { OnOffencePerbill::mutate(|f| { *f = slash_fraction.to_vec(); diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index 24d259ed1d4..6531080b8d1 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -33,7 +33,7 @@ use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; use sp_runtime::Perbill; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; +use sp_staking::offence::OnOffenceHandler; pub use pallet::*; @@ -128,7 +128,7 @@ pub mod pallet { T::AccountId, IdentificationTuple, Weight, - >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); + >>::on_offence(&offenders, &slash_fraction, session_index); } } } diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 626db138c2b..7e7332c3f7e 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -133,7 +133,6 @@ parameter_types! { pub static SlashDeferDuration: EraIndex = 0; pub const BondingDuration: EraIndex = 3; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } impl pallet_staking::Config for Test { @@ -153,7 +152,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; @@ -165,6 +163,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_session::historical::Config for Test { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 81052141fd8..6cefa8f39a8 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -174,7 +174,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; @@ -186,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl crate::Config for Test {} diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 17b6aa7a464..9506e98adf7 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -627,7 +627,7 @@ impl Pallet { Validators::::put(&validators); if changed { - // reset disabled validators + // reset disabled validators if active set was changed >::take(); } diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md index 719aa388755..113b7a6200b 100644 --- a/substrate/frame/staking/CHANGELOG.md +++ b/substrate/frame/staking/CHANGELOG.md @@ -7,6 +7,25 @@ on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a single integer version number for staking pallet to keep track of all storage migrations. +## [v15] + +### Added + +- New trait `DisablingStrategy` which is responsible for making a decision which offenders should be + disabled on new offence. +- Default implementation of `DisablingStrategy` - `UpToLimitDisablingStrategy`. It + disables each new offender up to a threshold (1/3 by default). Offenders are not runtime disabled for + offences in previous era(s). But they will be low-priority node-side disabled for dispute initiation. +- `OffendingValidators` storage item is replaced with `DisabledValidators`. The former keeps all + offenders and if they are disabled or not. The latter just keeps a list of all offenders as they + are disabled by default. + +### Deprecated + +- `enum DisableStrategy` is no longer needed because disabling is not related to the type of the + offence anymore. A decision if a offender is disabled or not is made by a `DisablingStrategy` + implementation. + ## [v14] ### Added diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index f5b7e3eca3d..047ad6b87cc 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -1239,3 +1239,79 @@ impl BenchmarkingConfig for TestBenchmarkingConfig { type MaxValidators = frame_support::traits::ConstU32<100>; type MaxNominators = frame_support::traits::ConstU32<100>; } + +/// Controls validator disabling +pub trait DisablingStrategy { + /// Make a disabling decision. Returns the index of the validator to disable or `None` if no new + /// validator should be disabled. + fn decision( + offender_stash: &T::AccountId, + slash_era: EraIndex, + currently_disabled: &Vec, + ) -> Option; +} + +/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a +/// threshold. `DISABLING_LIMIT_FACTOR` is the factor of the maximum disabled validators in the +/// active set. E.g. setting this value to `3` means no more than 1/3 of the validators in the +/// active set can be disabled in an era. +/// By default a factor of 3 is used which is the byzantine threshold. +pub struct UpToLimitDisablingStrategy; + +impl UpToLimitDisablingStrategy { + /// Disabling limit calculated from the total number of validators in the active set. When + /// reached no more validators will be disabled. + pub fn disable_limit(validators_len: usize) -> usize { + validators_len + .saturating_sub(1) + .checked_div(DISABLING_LIMIT_FACTOR) + .unwrap_or_else(|| { + defensive!("DISABLING_LIMIT_FACTOR should not be 0"); + 0 + }) + } +} + +impl DisablingStrategy + for UpToLimitDisablingStrategy +{ + fn decision( + offender_stash: &T::AccountId, + slash_era: EraIndex, + currently_disabled: &Vec, + ) -> Option { + let active_set = T::SessionInterface::validators(); + + // We don't disable more than the limit + if currently_disabled.len() >= Self::disable_limit(active_set.len()) { + log!( + debug, + "Won't disable: reached disabling limit {:?}", + Self::disable_limit(active_set.len()) + ); + return None + } + + // We don't disable for offences in previous eras + if ActiveEra::::get().map(|e| e.index).unwrap_or_default() > slash_era { + log!( + debug, + "Won't disable: current_era {:?} > slash_era {:?}", + Pallet::::current_era().unwrap_or_default(), + slash_era + ); + return None + } + + let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { + idx as u32 + } else { + log!(debug, "Won't disable: offender not in active set",); + return None + }; + + log!(debug, "Will disable {:?}", offender_idx); + + Some(offender_idx) + } +} diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index d5b18421d5b..510252be26c 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -20,9 +20,10 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::{ + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::{GetStorageVersion, OnRuntimeUpgrade}, + traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}, }; #[cfg(feature = "try-runtime")] @@ -59,11 +60,61 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Migrating `OffendingValidators` from `Vec<(u32, bool)>` to `Vec` +pub mod v15 { + use super::*; + + // The disabling strategy used by staking pallet + type DefaultDisablingStrategy = UpToLimitDisablingStrategy; + + pub struct VersionUncheckedMigrateV14ToV15(sp_std::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV14ToV15 { + fn on_runtime_upgrade() -> Weight { + let mut migrated = v14::OffendingValidators::::take() + .into_iter() + .filter(|p| p.1) // take only disabled validators + .map(|p| p.0) + .collect::>(); + + // Respect disabling limit + migrated.truncate(DefaultDisablingStrategy::disable_limit( + T::SessionInterface::validators().len(), + )); + + DisabledValidators::::set(migrated); + + log!(info, "v15 applied successfully."); + T::DbWeight::get().reads_writes(1, 1) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + frame_support::ensure!( + v14::OffendingValidators::::decode_len().is_none(), + "OffendingValidators is not empty after the migration" + ); + Ok(()) + } + } + + pub type MigrateV14ToV15 = VersionedMigration< + 14, + 15, + VersionUncheckedMigrateV14ToV15, + Pallet, + ::DbWeight, + >; +} + /// Migration of era exposure storage items to paged exposures. /// Changelog: [v14.](https://github.com/paritytech/substrate/blob/ankan/paged-rewards-rebased2/frame/staking/CHANGELOG.md#14) pub mod v14 { use super::*; + #[frame_support::storage_alias] + pub(crate) type OffendingValidators = + StorageValue, Vec<(u32, bool)>, ValueQuery>; + pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { @@ -73,10 +124,10 @@ pub mod v14 { if in_code == 14 && on_chain == 13 { in_code.put::>(); - log!(info, "v14 applied successfully."); + log!(info, "staking v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) } else { - log!(warn, "v14 not applied."); + log!(warn, "staking v14 not applied."); T::DbWeight::get().reads(1) } } diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index b46b863c016..8c60dec65a8 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -34,7 +34,7 @@ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_io; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, OnStakingUpdate, }; @@ -186,7 +186,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } parameter_types! { @@ -267,6 +266,9 @@ impl OnStakingUpdate for EventListenerMock { } } +// Disabling threshold for `UpToLimitDisablingStrategy` +pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; + impl crate::pallet::pallet::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; @@ -284,7 +286,6 @@ impl crate::pallet::pallet::Config for Test { type EraPayout = ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. @@ -297,6 +298,7 @@ impl crate::pallet::pallet::Config for Test { type EventListeners = EventListenerMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct WeightedNominationsQuota; @@ -461,6 +463,8 @@ impl ExtBuilder { (31, self.balance_factor * 2000), (41, self.balance_factor * 2000), (51, self.balance_factor * 2000), + (201, self.balance_factor * 2000), + (202, self.balance_factor * 2000), // optional nominator (100, self.balance_factor * 2000), (101, self.balance_factor * 2000), @@ -488,8 +492,10 @@ impl ExtBuilder { (31, 31, self.balance_factor * 500, StakerStatus::::Validator), // an idle validator (41, 41, self.balance_factor * 1000, StakerStatus::::Idle), - ]; - // optionally add a nominator + (51, 51, self.balance_factor * 1000, StakerStatus::::Idle), + (201, 201, self.balance_factor * 1000, StakerStatus::::Idle), + (202, 202, self.balance_factor * 1000, StakerStatus::::Idle), + ]; // optionally add a nominator if self.nominate { stakers.push(( 101, @@ -728,12 +734,11 @@ pub(crate) fn on_offence_in_era( >], slash_fraction: &[Perbill], era: EraIndex, - disable_strategy: DisableStrategy, ) { let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session, disable_strategy); + let _ = Staking::on_offence(offenders, slash_fraction, start_session); return } else if bonded_era > era { break @@ -745,7 +750,6 @@ pub(crate) fn on_offence_in_era( offenders, slash_fraction, Staking::eras_start_session_index(era).unwrap(), - disable_strategy, ); } else { panic!("cannot slash in era {}", era); @@ -760,7 +764,7 @@ pub(crate) fn on_offence_now( slash_fraction: &[Perbill], ) { let now = Staking::active_era().unwrap().index; - on_offence_in_era(offenders, slash_fraction, now, DisableStrategy::WhenSlashed) + on_offence_in_era(offenders, slash_fraction, now) } pub(crate) fn add_slash(who: &AccountId) { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 0c0ef0dbf46..f4d4a7133dd 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -43,7 +43,7 @@ use sp_runtime::{ }; use sp_staking::{ currency_to_vote::CurrencyToVote, - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, OnStakingUpdate, Page, SessionIndex, Stake, StakingAccount::{self, Controller, Stash}, StakingInterface, @@ -505,10 +505,8 @@ impl Pallet { } // disable all offending validators that have been disabled for the whole era - for (index, disabled) in >::get() { - if disabled { - T::SessionInterface::disable_validator(index); - } + for index in >::get() { + T::SessionInterface::disable_validator(index); } } @@ -598,8 +596,8 @@ impl Pallet { >::insert(&active_era.index, validator_payout); T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); - // Clear offending validators. - >::kill(); + // Clear disabled validators. + >::kill(); } } @@ -868,14 +866,6 @@ impl Pallet { Self::deposit_event(Event::::ForceEra { mode }); } - /// Ensures that at the end of the current session there will be a new era. - pub(crate) fn ensure_new_era() { - match ForceEra::::get() { - Forcing::ForceAlways | Forcing::ForceNew => (), - _ => Self::set_force_era(Forcing::ForceNew), - } - } - #[cfg(feature = "runtime-benchmarks")] pub fn add_era_stakers( current_era: EraIndex, @@ -1447,7 +1437,6 @@ where >], slash_fraction: &[Perbill], slash_session: SessionIndex, - disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); let mut consumed_weight = Weight::from_parts(0, 0); @@ -1512,7 +1501,6 @@ where window_start, now: active_era, reward_proportion, - disable_strategy, }); Self::deposit_event(Event::::SlashReported { @@ -1986,7 +1974,8 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; Self::check_paged_exposures()?; - Self::check_count() + Self::check_count()?; + Self::ensure_disabled_validators_sorted() } /// Invariants: @@ -2300,4 +2289,12 @@ impl Pallet { Ok(()) } + + fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> { + ensure!( + DisabledValidators::::get().windows(2).all(|pair| pair[0] <= pair[1]), + "DisabledValidators is not sorted" + ); + Ok(()) + } } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 76ddad6f135..9c968d88344 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -47,10 +47,11 @@ mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, - NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, - SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, DisablingStrategy, + EraPayout, EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, + MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of @@ -67,7 +68,7 @@ pub mod pallet { use super::*; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -217,10 +218,6 @@ pub mod pallet { #[pallet::constant] type MaxExposurePageSize: Get; - /// The fraction of the validator set that is safe to be offending. - /// After the threshold is reached a new era will be forced. - type OffendingValidatorsThreshold: Get; - /// Something that provides a best-effort sorted list of voters aka electing nominators, /// used for NPoS election. /// @@ -278,6 +275,9 @@ pub mod pallet { /// WARNING: this only reports slashing and withdraw events for the time being. type EventListeners: sp_staking::OnStakingUpdate>; + // `DisablingStragegy` controls how validators are disabled + type DisablingStrategy: DisablingStrategy; + /// Some parameters of the benchmarking. type BenchmarkingConfig: BenchmarkingConfig; @@ -654,19 +654,16 @@ pub mod pallet { #[pallet::getter(fn current_planned_session)] pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; - /// Indices of validators that have offended in the active era and whether they are currently - /// disabled. + /// Indices of validators that have offended in the active era. The offenders are disabled for a + /// whole era. For this reason they are kept here - only staking pallet knows about eras. The + /// implementor of [`DisablingStrategy`] defines if a validator should be disabled which + /// implicitly means that the implementor also controls the max number of disabled validators. /// - /// This value should be a superset of disabled validators since not all offences lead to the - /// validator being disabled (if there was no slash). This is needed to track the percentage of - /// validators that have offended in the current era, ensuring a new era is forced if - /// `OffendingValidatorsThreshold` is reached. The vec is always kept sorted so that we can find - /// whether a given validator has previously offended using binary search. It gets cleared when - /// the era ends. + /// The vec is always kept sorted so that we can find whether a given validator has previously + /// offended using binary search. #[pallet::storage] #[pallet::unbounded] - #[pallet::getter(fn offending_validators)] - pub type OffendingValidators = StorageValue<_, Vec<(u32, bool)>, ValueQuery>; + pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; /// The threshold for when users can start calling `chill_other` for other validators / /// nominators. The threshold is compared to the actual number of validators / nominators diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 2011e9eb830..f831f625957 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,21 +50,21 @@ //! Based on research at use crate::{ - BalanceOf, Config, Error, Exposure, NegativeImbalanceOf, NominatorSlashInEra, - OffendingValidators, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, + BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure, NegativeImbalanceOf, + NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, ValidatorSlashInEra, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Currency, Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, + traits::{Currency, Defensive, DefensiveSaturating, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::{offence::DisableStrategy, EraIndex}; +use sp_staking::EraIndex; use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. @@ -220,8 +220,6 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The maximum percentage of a slash that ever gets paid out. /// This is f_inf in the paper. pub(crate) reward_proportion: Perbill, - /// When to disable offenders. - pub(crate) disable_strategy: DisableStrategy, } /// Computes a slash of a validator and nominators. It returns an unapplied @@ -280,18 +278,13 @@ pub(crate) fn compute_slash( let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); if target_span == Some(spans.span_index()) { - // misbehavior occurred within the current slashing span - take appropriate - // actions. - - // chill the validator - it misbehaved in the current span and should - // not continue in the next election. also end the slashing span. + // misbehavior occurred within the current slashing span - end current span. + // Check for details. spans.end_span(params.now); - >::chill_stash(params.stash); } } - let disable_when_slashed = params.disable_strategy != DisableStrategy::Never; - add_offending_validator::(params.stash, disable_when_slashed); + add_offending_validator::(¶ms); let mut nominators_slashed = Vec::new(); reward_payout += slash_nominators::(params.clone(), prior_slash_p, &mut nominators_slashed); @@ -320,54 +313,31 @@ fn kick_out_if_recent(params: SlashParams) { ); if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { + // Check https://github.com/paritytech/polkadot-sdk/issues/2650 for details spans.end_span(params.now); - >::chill_stash(params.stash); } - let disable_without_slash = params.disable_strategy == DisableStrategy::Always; - add_offending_validator::(params.stash, disable_without_slash); + add_offending_validator::(¶ms); } -/// Add the given validator to the offenders list and optionally disable it. -/// If after adding the validator `OffendingValidatorsThreshold` is reached -/// a new era will be forced. -fn add_offending_validator(stash: &T::AccountId, disable: bool) { - OffendingValidators::::mutate(|offending| { - let validators = T::SessionInterface::validators(); - let validator_index = match validators.iter().position(|i| i == stash) { - Some(index) => index, - None => return, - }; - - let validator_index_u32 = validator_index as u32; - - match offending.binary_search_by_key(&validator_index_u32, |(index, _)| *index) { - // this is a new offending validator - Err(index) => { - offending.insert(index, (validator_index_u32, disable)); - - let offending_threshold = - T::OffendingValidatorsThreshold::get() * validators.len() as u32; - - if offending.len() >= offending_threshold as usize { - // force a new era, to select a new validator set - >::ensure_new_era() - } - - if disable { - T::SessionInterface::disable_validator(validator_index_u32); - } - }, - Ok(index) => { - if disable && !offending[index].1 { - // the validator had previously offended without being disabled, - // let's make sure we disable it now - offending[index].1 = true; - T::SessionInterface::disable_validator(validator_index_u32); - } - }, +/// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of +/// validators provided by [`make_disabling_decision`]. +fn add_offending_validator(params: &SlashParams) { + DisabledValidators::::mutate(|disabled| { + if let Some(offender) = + T::DisablingStrategy::decision(params.stash, params.slash_era, &disabled) + { + // Add the validator to `DisabledValidators` and disable it. Do nothing if it is + // already disabled. + if let Err(index) = disabled.binary_search_by_key(&offender, |index| *index) { + disabled.insert(index, offender); + T::SessionInterface::disable_validator(offender); + } } }); + + // `DisabledValidators` should be kept sorted + debug_assert!(DisabledValidators::::get().windows(2).all(|pair| pair[0] < pair[1])); } /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 87f6fd424bd..6cf5a56e5a6 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -38,7 +38,7 @@ use sp_runtime::{ Perbill, Percent, Perquintill, Rounding, TokenError, }; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, SessionIndex, }; use sp_std::prelude::*; @@ -716,56 +716,65 @@ fn nominating_and_rewards_should_work() { #[test] fn nominators_also_get_slashed_pro_rata() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), &11); - // 101 is a nominator for 11 - assert_eq!(initial_exposure.others.first().unwrap().who, 101); - - // staked values; - let nominator_stake = Staking::ledger(101.into()).unwrap().active; - let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11.into()).unwrap().active; - let validator_balance = balances(&11).0; - let exposed_stake = initial_exposure.total; - let exposed_validator = initial_exposure.own; - let exposed_nominator = initial_exposure.others.first().unwrap().value; - - // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + + // staked values; + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], + &[slash_percent], + ); - // both stakes must have been decreased. - assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); - assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); + // both stakes must have been decreased. + assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); + assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); - let slash_amount = slash_percent * exposed_stake; - let validator_share = - Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = - Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; - // both slash amounts need to be positive for the test to make sense. - assert!(validator_share > 0); - assert!(nominator_share > 0); + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); - // both stakes must have been decreased pro-rata. - assert_eq!(Staking::ledger(101.into()).unwrap().active, nominator_stake - nominator_share); - assert_eq!(Staking::ledger(11.into()).unwrap().active, validator_stake - validator_share); - assert_eq!( - balances(&101).0, // free balance - nominator_balance - nominator_share, - ); - assert_eq!( - balances(&11).0, // free balance - validator_balance - validator_share, - ); - // Because slashing happened. - assert!(is_disabled(11)); - }); + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + assert_eq!( + balances(&101).0, // free balance + nominator_balance - nominator_share, + ); + assert_eq!( + balances(&11).0, // free balance + validator_balance - validator_share, + ); + // Because slashing happened. + assert!(is_disabled(11)); + }); } #[test] @@ -2401,7 +2410,7 @@ fn era_is_always_same_length() { } #[test] -fn offence_forces_new_era() { +fn offence_doesnt_force_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { @@ -2411,7 +2420,7 @@ fn offence_forces_new_era() { &[Perbill::from_percent(5)], ); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2435,26 +2444,32 @@ fn offence_ensures_new_era_without_clobbering() { #[test] fn offence_deselects_validator_even_when_slash_is_zero() { - ExtBuilder::default().build_and_execute(|| { - assert!(Session::validators().contains(&11)); - assert!(>::contains_key(11)); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + assert!(Session::validators().contains(&11)); + assert!(>::contains_key(11)); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(is_disabled(11)); - mock::start_active_era(1); + mock::start_active_era(1); - assert!(!Session::validators().contains(&11)); - assert!(!>::contains_key(11)); - }); + // The validator should be reenabled in the new era + assert!(!is_disabled(11)); + }); } #[test] @@ -2479,71 +2494,70 @@ fn slashing_performed_according_exposure() { } #[test] -fn slash_in_old_span_does_not_deselect() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); - - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); +fn validator_is_not_disabled_for_an_offence_in_previous_era() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); - mock::start_active_era(2); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); - Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(!Session::validators().contains(&11)); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(is_disabled(11)); - mock::start_active_era(3); + mock::start_active_era(2); - // this staker is in a new slashing span now, having re-registered after - // their prior slash. + // the validator is not disabled in the new era + Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - 1, - DisableStrategy::WhenSlashed, - ); + mock::start_active_era(3); - // the validator doesn't get chilled again - assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + // an offence committed in era 1 is reported in era 3 + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + 1, + ); - // but we are still forcing a new era - assert_eq!(Staking::force_era(), Forcing::ForceNew); + // the validator doesn't get disabled for an old offence + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - // NOTE: A 100% slash here would clean up the account, causing de-registration. - &[Perbill::from_percent(95)], - 1, - DisableStrategy::WhenSlashed, - ); + // and we are not forcing a new era + assert_eq!(Staking::force_era(), Forcing::NotForcing); - // the validator doesn't get chilled again - assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + // NOTE: A 100% slash here would clean up the account, causing de-registration. + &[Perbill::from_percent(95)], + 1, + ); - // but it's disabled - assert!(is_disabled(11)); - // and we are still forcing a new era - assert_eq!(Staking::force_era(), Forcing::ForceNew); - }); + // the validator doesn't get disabled again + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); + // and we are still not forcing a new era + assert_eq!(Staking::force_era(), Forcing::NotForcing); + }); } #[test] @@ -2671,7 +2685,7 @@ fn dont_slash_if_fraction_is_zero() { // The validator hasn't been slashed. The new era is not forced. assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2692,7 +2706,7 @@ fn only_slash_for_max_in_era() { // The validator has been slashed and has been force-chilled. assert_eq!(Balances::free_balance(11), 500); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); on_offence_now( &[OffenceDetails { @@ -2833,7 +2847,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(10)], 2, - DisableStrategy::WhenSlashed, ); assert_eq!(Balances::free_balance(11), 900); @@ -2860,7 +2873,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(30)], 3, - DisableStrategy::WhenSlashed, ); // 11 was not further slashed, but 21 and 101 were. @@ -2882,7 +2894,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(20)], 2, - DisableStrategy::WhenSlashed, ); // 11 was further slashed, but 21 and 101 were not. @@ -2999,11 +3010,8 @@ fn deferred_slashes_are_deferred() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, Event::SlashReported { validator: 11, slash_era: 1, .. }, Event::StakersElected, - Event::ForceEra { mode: Forcing::NotForcing }, .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } @@ -3029,7 +3037,6 @@ fn retroactive_deferred_slashes_two_eras_before() { &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 1, // should be deferred for two full eras, and applied at the beginning of era 4. - DisableStrategy::Never, ); mock::start_active_era(4); @@ -3037,8 +3044,6 @@ fn retroactive_deferred_slashes_two_eras_before() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, Event::SlashReported { validator: 11, slash_era: 1, .. }, .., Event::Slashed { staker: 11, amount: 100 }, @@ -3067,7 +3072,6 @@ fn retroactive_deferred_slashes_one_before() { &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 2, // should be deferred for two full eras, and applied at the beginning of era 5. - DisableStrategy::Never, ); mock::start_active_era(4); @@ -3197,7 +3201,6 @@ fn remove_deferred() { &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], 1, - DisableStrategy::WhenSlashed, ); // fails if empty @@ -3312,192 +3315,198 @@ fn remove_multi_deferred() { #[test] fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - // pre-slash balance - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - // 100 has approval for 11 as of now - assert!(Staking::nominators(101).unwrap().targets.contains(&11)); + // pre-slash balance + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // 11 and 21 both have the support of 100 - let exposure_11 = Staking::eras_stakers(active_era(), &11); - let exposure_21 = Staking::eras_stakers(active_era(), &21); + // 100 has approval for 11 as of now + assert!(Staking::nominators(101).unwrap().targets.contains(&11)); - assert_eq!(exposure_11.total, 1000 + 125); - assert_eq!(exposure_21.total, 1000 + 375); + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(10)], - ); + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(10), - slash_era: 1 - }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, - ] - ); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); - // post-slash balance - let nominator_slash_amount_11 = 125 / 10; - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(10), + slash_era: 1 + }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, + ] + ); - // check that validator was chilled. - assert!(Validators::::iter().all(|(stash, _)| stash != 11)); + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); - // actually re-bond the slashed validator - assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); + // check that validator was disabled. + assert!(is_disabled(11)); - mock::start_active_era(2); - let exposure_11 = Staking::eras_stakers(active_era(), &11); - let exposure_21 = Staking::eras_stakers(active_era(), &21); + // actually re-bond the slashed validator + assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); - // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 - // 900 + 146 - assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); - // 1000 + 342 - assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); - assert_eq!(500 - 146 - 342, nominator_slash_amount_11); - }); + mock::start_active_era(2); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 + // 900 + 146 + assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); + // 1000 + 342 + assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); + assert_eq!(500 - 146 - 342, nominator_slash_amount_11); + }); } #[test] -fn non_slashable_offence_doesnt_disable_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); +fn non_slashable_offence_disables_validator() { + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - // offence with no slash associated - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - ); + // offence with no slash associated + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); - // it does NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // it does NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond - on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(25)], - ); + // offence that slashes 25% of the bond + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); - // it DOES NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // it DOES NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(0), - slash_era: 1 - }, - Event::Chilled { stash: 21 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(25), - slash_era: 1 - }, - Event::Slashed { staker: 21, amount: 250 }, - Event::Slashed { staker: 101, amount: 94 } - ] - ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); - // the offence for validator 10 wasn't slashable so it wasn't disabled - assert!(!is_disabled(11)); - // whereas validator 20 gets disabled - assert!(is_disabled(21)); - }); + // the offence for validator 11 wasn't slashable but it is disabled + assert!(is_disabled(11)); + // validator 21 gets disabled too + assert!(is_disabled(21)); + }); } #[test] fn slashing_independent_of_disabling_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); + ExtBuilder::default() + .validator_count(5) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let now = Staking::active_era().unwrap().index; + let now = Staking::active_era().unwrap().index; - // offence with no slash associated, BUT disabling - on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - now, - DisableStrategy::Always, - ); + // offence with no slash associated + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + now, + ); - // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond, BUT not disabling - on_offence_in_era( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(25)], - now, - DisableStrategy::Never, - ); + // offence that slashes 25% of the bond + on_offence_in_era( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + now, + ); - // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(0), - slash_era: 1 - }, - Event::Chilled { stash: 21 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(25), - slash_era: 1 - }, - Event::Slashed { staker: 21, amount: 250 }, - Event::Slashed { staker: 101, amount: 94 } - ] - ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); - // the offence for validator 10 was explicitly disabled - assert!(is_disabled(11)); - // whereas validator 21 is explicitly not disabled - assert!(!is_disabled(21)); - }); + // first validator is disabled but not slashed + assert!(is_disabled(11)); + // second validator is slashed but not disabled + assert!(!is_disabled(21)); + }); } #[test] -fn offence_threshold_triggers_new_era() { +fn offence_threshold_doesnt_trigger_new_era() { ExtBuilder::default() .validator_count(4) .set_status(41, StakerStatus::Validator) @@ -3506,12 +3515,14 @@ fn offence_threshold_triggers_new_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); assert_eq!( - ::OffendingValidatorsThreshold::get(), - Perbill::from_percent(75), + UpToLimitDisablingStrategy::::disable_limit( + Session::validators().len() + ), + 1 ); - // we have 4 validators and an offending validator threshold of 75%, - // once the third validator commits an offence a new era should be forced + // we have 4 validators and an offending validator threshold of 1/3, + // even if the third validator commits an offence a new era should not be forced let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); @@ -3522,6 +3533,9 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); + // 11 should be disabled because the byzantine threshold is 1 + assert!(is_disabled(11)); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( @@ -3529,6 +3543,10 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); + // 21 should not be disabled because the number of disabled validators will be above the + // byzantine threshold + assert!(!is_disabled(21)); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( @@ -3536,28 +3554,29 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); - assert_eq!(ForceEra::::get(), Forcing::ForceNew); + // same for 31 + assert!(!is_disabled(31)); + + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } #[test] fn disabled_validators_are_kept_disabled_for_whole_era() { ExtBuilder::default() - .validator_count(4) + .validator_count(7) .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) .build_and_execute(|| { mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - ); - on_offence_now( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3566,18 +3585,15 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { // nominations are not updated. assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // validator 11 should not be disabled since the offence wasn't slashable - assert!(!is_disabled(11)); // validator 21 gets disabled since it got slashed assert!(is_disabled(21)); advance_session(); // disabled validators should carry-on through all sessions in the era - assert!(!is_disabled(11)); assert!(is_disabled(21)); - // validator 11 should now get disabled + // validator 11 commits an offence on_offence_now( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3687,27 +3703,34 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { #[test] fn zero_slash_keeps_nominators() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + let exposure = Staking::eras_stakers(active_era(), &11); + assert_eq!(Balances::free_balance(101), 2000); - on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], - &[Perbill::from_percent(0)], - ); + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(0)], + ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // 11 is still removed.. - assert!(Validators::::iter().all(|(stash, _)| stash != 11)); - // but their nominations are kept. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - }); + // 11 is not removed but disabled + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(is_disabled(11)); + // and their nominations are kept. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + }); } #[test] @@ -4710,7 +4733,7 @@ fn offences_weight_calculated_correctly() { let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); assert_eq!( - Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), + Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), zero_offence_weight ); @@ -4735,7 +4758,6 @@ fn offences_weight_calculated_correctly() { &offenders, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed ), n_offence_unapplied_weight ); @@ -4765,7 +4787,6 @@ fn offences_weight_calculated_correctly() { &one_offender, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight ); @@ -7011,62 +7032,71 @@ mod staking_unchecked { #[test] fn virtual_nominators_are_lazily_slashed() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), &11); - // 101 is a nominator for 11 - assert_eq!(initial_exposure.others.first().unwrap().who, 101); - // make 101 a virtual nominator - ::migrate_to_virtual_staker(&101); - // set payee different to self. - assert_ok!(::update_payee(&101, &102)); - - // cache values - let nominator_stake = Staking::ledger(101.into()).unwrap().active; - let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11.into()).unwrap().active; - let validator_balance = balances(&11).0; - let exposed_stake = initial_exposure.total; - let exposed_validator = initial_exposure.own; - let exposed_nominator = initial_exposure.others.first().unwrap().value; - - // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); - let slash_amount = slash_percent * exposed_stake; - let validator_share = - Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = - Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; - // both slash amounts need to be positive for the test to make sense. - assert!(validator_share > 0); - assert!(nominator_share > 0); + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); - // both stakes must have been decreased pro-rata. - assert_eq!( - Staking::ledger(101.into()).unwrap().active, - nominator_stake - nominator_share - ); - assert_eq!( - Staking::ledger(11.into()).unwrap().active, - validator_stake - validator_share - ); + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); - // validator balance is slashed as usual - assert_eq!(balances(&11).0, validator_balance - validator_share); - // Because slashing happened. - assert!(is_disabled(11)); + // validator balance is slashed as usual + assert_eq!(balances(&11).0, validator_balance - validator_share); + // Because slashing happened. + assert!(is_disabled(11)); - // but virtual nominator's balance is not slashed. - assert_eq!(Balances::free_balance(&101), nominator_balance); - // but slash is broadcasted to slash observers. - assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); - }) + // but virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // but slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); + }) } } mod ledger { @@ -7926,3 +7956,69 @@ mod ledger_recovery { }) } } + +mod byzantine_threshold_disabling_strategy { + use crate::{ + tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy, + }; + use sp_staking::EraIndex; + + // Common test data - the stash of the offending validator, the era of the offence and the + // active set + const OFFENDER_ID: ::AccountId = 7; + const SLASH_ERA: EraIndex = 1; + const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; + const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set + + #[test] + fn dont_disable_for_ancient_offence() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disable_offender.is_none()); + }); + } + + #[test] + fn dont_disable_beyond_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![1, 2]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disable_offender.is_none()); + }); + } + + #[test] + fn disable_when_below_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![1]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert_eq!(disable_offender, Some(OFFENDER_VALIDATOR_IDX)); + }); + } +} diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 30d96d0cbaf..2c2ebc1fc97 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -37,29 +37,6 @@ pub type Kind = [u8; 16]; /// so that we can slash it accordingly. pub type OffenceCount = u32; -/// In case of an offence, which conditions get an offending validator disabled. -#[derive( - Clone, - Copy, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - Encode, - Decode, - sp_runtime::RuntimeDebug, - scale_info::TypeInfo, -)] -pub enum DisableStrategy { - /// Independently of slashing, this offence will not disable the offender. - Never, - /// Only disable the offender if it is also slashed. - WhenSlashed, - /// Independently of slashing, this offence will always disable the offender. - Always, -} - /// A trait implemented by an offence report. /// /// This trait assumes that the offence is legitimate and was validated already. @@ -102,11 +79,6 @@ pub trait Offence { /// number. Note that for GRANDPA the round number is reset each epoch. fn time_slot(&self) -> Self::TimeSlot; - /// In which cases this offence needs to disable offenders until the next era starts. - fn disable_strategy(&self) -> DisableStrategy { - DisableStrategy::WhenSlashed - } - /// A slash fraction of the total exposure that should be slashed for this /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. /// @@ -177,15 +149,12 @@ pub trait OnOffenceHandler { /// /// The `session` parameter is the session index of the offence. /// - /// The `disable_strategy` parameter decides if the offenders need to be disabled immediately. - /// /// The receiver might decide to not accept this offence. In this case, the call site is /// responsible for queuing the report and re-submitting again. fn on_offence( offenders: &[OffenceDetails], slash_fraction: &[Perbill], session: SessionIndex, - disable_strategy: DisableStrategy, ) -> Res; } @@ -194,7 +163,6 @@ impl OnOffenceHandler _offenders: &[OffenceDetails], _slash_fraction: &[Perbill], _session: SessionIndex, - _disable_strategy: DisableStrategy, ) -> Res { Default::default() } -- GitLab From d893cde2cfd1992a3e589614ae09088d92f28a59 Mon Sep 17 00:00:00 2001 From: Ron Date: Fri, 26 Apr 2024 23:51:58 +0800 Subject: [PATCH 085/107] Snowbridge: deposit extra fee to beneficiary on Asset Hub (#4175) Just the upper-stream for https://github.com/Snowfork/polkadot-sdk/pull/137 and more context there. --------- Co-authored-by: Clara van Staden Co-authored-by: Adrian Catangiu --- .../primitives/router/src/inbound/mod.rs | 6 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 133 +++++++++++++++++- prdoc/pr_4175.prdoc | 13 ++ 3 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_4175.prdoc diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index c20554c6d18..54e47a7a8b6 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -273,8 +273,10 @@ where }, None => { instructions.extend(vec![ - // Deposit asset to beneficiary. - DepositAsset { assets: Definite(asset.into()), beneficiary }, + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, ]); }, } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index d0c02e61134..1c1c51404aa 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -27,7 +27,7 @@ use snowbridge_pallet_inbound_queue_fixtures::{ }; use snowbridge_pallet_system; use snowbridge_router_primitives::inbound::{ - Command, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, + Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, }; use sp_core::H256; use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; @@ -40,6 +40,7 @@ const TREASURY_ACCOUNT: [u8; 32] = const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const INSUFFICIENT_XCM_FEE: u128 = 1000; +const XCM_FEE: u128 = 4_000_000_000; #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum ControlCall { @@ -555,3 +556,133 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { ); }); } + +fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { + let weth_asset_location: Location = Location::new( + 2, + [EthereumNetwork::get().into(), AccountKey20 { network: None, key: WETH }], + ); + // (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }) + // Fund asset hub sovereign on bridge hub + let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( + 1, + [Parachain(AssetHubRococo::para_id().into())], + )); + BridgeHubRococo::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]); + + // Register WETH + AssetHubRococo::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + asset_hub_sovereign.into(), + false, + 1, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_location.clone().try_into().unwrap(), + )); + }); + + // Send WETH to an existent account on asset hub + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message_id: H256 = [0; 32].into(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: account_id }, + amount: 1_000_000, + fee, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap(); + assert_ok!(EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into())); + + // Check that the message was sent + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_existent_account_on_asset_hub() { + send_token_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() { + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() { + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the message was not processed successfully due to insufficient fee + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed( +) { + // On AH the xcm fee is 33_873_024 and the ED is 3_300_000 + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 36_000_000); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the message was not processed successfully due to insufficient ED + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {}, + ] + ); + }); +} diff --git a/prdoc/pr_4175.prdoc b/prdoc/pr_4175.prdoc new file mode 100644 index 00000000000..7fc2fb68b38 --- /dev/null +++ b/prdoc/pr_4175.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Snowbridge: deposit extra fee to beneficiary on Asset Hub" + +doc: + - audience: Runtime Dev + description: | + Snowbridge transfers arriving on Asset Hub will deposit both asset and fees to beneficiary so the fees will not get trapped. + Another benefit is when fees left more than ED, could be used to create the beneficiary account in case it does not exist on asset hub. + +crates: + - name: snowbridge-router-primitives -- GitLab From 2a497d297575947b613fe0f3bbac9273a48fd6b0 Mon Sep 17 00:00:00 2001 From: antiyro <74653697+antiyro@users.noreply.github.com> Date: Fri, 26 Apr 2024 18:23:58 +0200 Subject: [PATCH 086/107] fix(seal): shameless fix on sealing typo (#4304) --- substrate/client/consensus/manual-seal/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index c3d360f0719..8fc7e7ecab2 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -86,7 +86,7 @@ where BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) } -/// Params required to start the instant sealing authorship task. +/// Params required to start the manual sealing authorship task. pub struct ManualSealParams, TP, SC, CS, CIDP, P> { /// Block import instance. pub block_import: BI, @@ -114,7 +114,7 @@ pub struct ManualSealParams, TP, SC, C pub create_inherent_data_providers: CIDP, } -/// Params required to start the manual sealing authorship task. +/// Params required to start the instant sealing authorship task. pub struct InstantSealParams, TP, SC, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, -- GitLab From 73b9a8391fa0b18308fa35f905e31cec77f5618f Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sun, 28 Apr 2024 14:35:51 +0200 Subject: [PATCH 087/107] [Staking] Runtime api if era rewards are pending to be claimed (#4301) closes https://github.com/paritytech/polkadot-sdk/issues/426. related to https://github.com/paritytech/polkadot-sdk/pull/1189. Would help offchain programs to query if there are unclaimed pages of rewards for a given era. The logic could look like below ```js // loop as long as all era pages are claimed. while (api.call.stakingApi.pendingRewards(era, validator_stash)) { api.tx.staking.payout_stakers(validator_stash, era) } ``` --- polkadot/runtime/westend/src/lib.rs | 4 + prdoc/pr_4301.prdoc | 13 +++ substrate/bin/node/runtime/src/lib.rs | 4 + .../frame/staking/runtime-api/src/lib.rs | 5 +- substrate/frame/staking/src/lib.rs | 28 ++++- substrate/frame/staking/src/pallet/impls.rs | 4 + substrate/frame/staking/src/tests.rs | 107 ++++++++++++++++++ 7 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_4301.prdoc diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 03ecd5c070b..de961bb4c39 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2250,6 +2250,10 @@ sp_api::impl_runtime_apis! { fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { Staking::api_eras_stakers_page_count(era, account) } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } } #[cfg(feature = "try-runtime")] diff --git a/prdoc/pr_4301.prdoc b/prdoc/pr_4301.prdoc new file mode 100644 index 00000000000..2ca2534243a --- /dev/null +++ b/prdoc/pr_4301.prdoc @@ -0,0 +1,13 @@ +title: New runtime api to check if a validator has pending pages of rewards for an era. + +doc: + - audience: + - Node Dev + - Runtime User + description: | + Creates a new runtime api to check if reward for an era is pending for a validator. Era rewards are paged and this + api will return true as long as there is one or more pages of era reward which are not claimed. + +crates: +- name: pallet-staking +- name: pallet-staking-runtime-api diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 0caaa8c7322..5d8016532a5 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2791,6 +2791,10 @@ impl_runtime_apis! { fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { Staking::api_eras_stakers_page_count(era, account) } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } } impl sp_consensus_babe::BabeApi for Runtime { diff --git a/substrate/frame/staking/runtime-api/src/lib.rs b/substrate/frame/staking/runtime-api/src/lib.rs index b04c383a077..7955f4184a4 100644 --- a/substrate/frame/staking/runtime-api/src/lib.rs +++ b/substrate/frame/staking/runtime-api/src/lib.rs @@ -30,7 +30,10 @@ sp_api::decl_runtime_apis! { /// Returns the nominations quota for a nominator with a given balance. fn nominations_quota(balance: Balance) -> u32; - /// Returns the page count of exposures for a validator in a given era. + /// Returns the page count of exposures for a validator `account` in a given era. fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page; + + /// Returns true if validator `account` has pages to be claimed for the given era. + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool; } } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 047ad6b87cc..692e62acfdf 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -1035,11 +1035,37 @@ where /// can and add more functions to it as needed. pub struct EraInfo(sp_std::marker::PhantomData); impl EraInfo { + /// Returns true if validator has one or more page of era rewards not claimed yet. + // Also looks at legacy storage that can be cleaned up after #433. + pub fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool { + let page_count = if let Some(overview) = >::get(&era, validator) { + overview.page_count + } else { + if >::contains_key(era, validator) { + // this means non paged exposure, and we treat them as single paged. + 1 + } else { + // if no exposure, then no rewards to claim. + return false + } + }; + + // check if era is marked claimed in legacy storage. + if >::get(validator) + .map(|l| l.legacy_claimed_rewards.contains(&era)) + .unwrap_or_default() + { + return false + } + + ClaimedRewards::::get(era, validator).len() < page_count as usize + } + /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be /// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards /// are relevant/claimable. - // Refer tracker issue for cleanup: #13034 + // Refer tracker issue for cleanup: https://github.com/paritytech/polkadot-sdk/issues/433 pub(crate) fn is_rewards_claimed_with_legacy_fallback( era: EraIndex, ledger: &StakingLedger, diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index f4d4a7133dd..4eb24311ab3 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1183,6 +1183,10 @@ impl Pallet { pub fn api_eras_stakers_page_count(era: EraIndex, account: T::AccountId) -> Page { EraInfo::::get_page_count(era, &account) } + + pub fn api_pending_rewards(era: EraIndex, account: T::AccountId) -> bool { + EraInfo::::pending_rewards(era, &account) + } } impl ElectionDataProvider for Pallet { diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 6cf5a56e5a6..d05752f54be 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -6796,6 +6796,113 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( }); } +#[test] +fn test_runtime_api_pending_rewards() { + ExtBuilder::default().build_and_execute(|| { + // GIVEN + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + let stake = 100; + + // validator with non-paged exposure, rewards marked in legacy claimed rewards. + let validator_one = 301; + // validator with non-paged exposure, rewards marked in paged claimed rewards. + let validator_two = 302; + // validator with paged exposure. + let validator_three = 303; + + // Set staker + for v in validator_one..=validator_three { + let _ = Balances::make_free_balance_be(&v, stake); + assert_ok!(Staking::bond(RuntimeOrigin::signed(v), stake, RewardDestination::Staked)); + } + + // Add reward points + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(validator_one, 1), (validator_two, 1), (validator_three, 1)] + .into_iter() + .collect(), + }; + ErasRewardPoints::::insert(0, reward); + + // build exposure + let mut individual_exposures: Vec> = vec![]; + for i in 0..=MaxExposurePageSize::get() { + individual_exposures.push(IndividualExposure { who: i.into(), value: stake }); + } + let exposure = Exposure:: { + total: stake * (MaxExposurePageSize::get() as Balance + 2), + own: stake, + others: individual_exposures, + }; + + // add non-paged exposure for one and two. + >::insert(0, validator_one, exposure.clone()); + >::insert(0, validator_two, exposure.clone()); + // add paged exposure for third validator + EraInfo::::set_exposure(0, &validator_three, exposure); + + // add some reward to be distributed + ErasValidatorReward::::insert(0, 1000); + + // mark rewards claimed for validator_one in legacy claimed rewards + >::insert( + validator_one, + StakingLedgerInspect { + stash: validator_one, + total: stake, + active: stake, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![0], + }, + ); + + // SCENARIO ONE: rewards already marked claimed in legacy storage. + // runtime api should return false for pending rewards for validator_one. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + // and if we try to pay, we get an error. + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO TWO: non-paged exposure + // validator two has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_two)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO THREE: validator with paged exposure (two pages). + // validator three has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // validator three has two pages of exposure, so pending rewards is still true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // payout again + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // for eras with no exposure, pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + }); +} + mod staking_interface { use frame_support::storage::with_storage_layer; use sp_staking::StakingInterface; -- GitLab From 954150f3b5fdb7d07d1ed01b138e2025245bb227 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Sun, 28 Apr 2024 16:29:21 +0100 Subject: [PATCH 088/107] remove unnessisary use statements due to 2021 core prelude (#4183) Some traits are already included in the 2021 prelude and so shouldn't be needed to use explicitly: use `convert::TryFrom`, `convert::TryInto`, and `iter::FromIterator` are removed. ( https://doc.rust-lang.org/core/prelude/rust_2021/ ) No breaking changes or change of functionality, so I think no PR doc is needed in this case. (Motivation: Removes some references to `sp-std`) --- bridges/bin/runtime-common/src/messages.rs | 2 +- bridges/modules/grandpa/src/lib.rs | 2 +- bridges/primitives/runtime/src/chain.rs | 2 +- bridges/primitives/runtime/src/lib.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_lane.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_metrics.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_target.rs | 2 +- bridges/snowbridge/pallets/inbound-queue/src/envelope.rs | 2 +- bridges/snowbridge/pallets/inbound-queue/src/lib.rs | 2 +- bridges/snowbridge/primitives/beacon/src/bits.rs | 2 +- bridges/snowbridge/primitives/beacon/src/serde_utils.rs | 2 +- bridges/snowbridge/primitives/ethereum/src/header.rs | 2 +- bridges/snowbridge/primitives/ethereum/src/mpt.rs | 2 +- bridges/snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/client/consensus/aura/src/collator.rs | 2 +- cumulus/client/consensus/aura/src/collators/basic.rs | 2 +- cumulus/client/consensus/aura/src/collators/lookahead.rs | 2 +- cumulus/client/network/src/lib.rs | 2 +- cumulus/parachains/pallets/collective-content/Cargo.toml | 2 +- polkadot/node/core/bitfield-signing/src/lib.rs | 2 +- polkadot/node/network/bitfield-distribution/src/tests.rs | 2 +- .../node/network/collator-protocol/src/collator_side/mod.rs | 1 - .../network/collator-protocol/src/validator_side/mod.rs | 2 -- .../network/statement-distribution/src/legacy_v1/tests.rs | 2 +- .../subsystem-types/src/messages/network_bridge_event.rs | 2 +- polkadot/xcm/src/v3/junction.rs | 1 - polkadot/xcm/src/v3/junctions.rs | 2 +- polkadot/xcm/src/v3/mod.rs | 6 +----- polkadot/xcm/src/v3/multiasset.rs | 5 +---- polkadot/xcm/src/v3/multilocation.rs | 6 +----- polkadot/xcm/src/v4/asset.rs | 5 +---- polkadot/xcm/src/v4/junction.rs | 1 - polkadot/xcm/src/v4/junctions.rs | 2 +- polkadot/xcm/src/v4/location.rs | 6 +----- polkadot/xcm/src/v4/mod.rs | 6 +----- polkadot/xcm/xcm-builder/src/tests/mod.rs | 1 - substrate/client/consensus/grandpa/rpc/src/lib.rs | 2 +- substrate/client/consensus/grandpa/src/environment.rs | 1 - substrate/client/mixnet/Cargo.toml | 2 +- .../network/src/protocol/notifications/upgrade/collec.rs | 1 - substrate/frame/Cargo.toml | 2 +- substrate/frame/alliance/src/benchmarking.rs | 6 +----- substrate/frame/alliance/src/lib.rs | 2 +- substrate/frame/alliance/src/mock.rs | 1 - substrate/frame/alliance/src/types.rs | 2 +- substrate/frame/examples/frame-crate/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/node-authorization/src/lib.rs | 2 +- substrate/frame/safe-mode/src/lib.rs | 1 - substrate/frame/sassafras/Cargo.toml | 2 +- substrate/frame/transaction-payment/rpc/src/lib.rs | 2 +- substrate/frame/tx-pause/src/lib.rs | 2 +- substrate/primitives/consensus/sassafras/Cargo.toml | 2 +- substrate/primitives/core/fuzz/Cargo.toml | 1 + substrate/primitives/mixnet/Cargo.toml | 2 +- substrate/primitives/state-machine/src/basic.rs | 1 - 56 files changed, 46 insertions(+), 82 deletions(-) diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs index 4aca53f3b98..0fe9935dbdb 100644 --- a/bridges/bin/runtime-common/src/messages.rs +++ b/bridges/bin/runtime-common/src/messages.rs @@ -35,7 +35,7 @@ use frame_support::{traits::Get, weights::Weight}; use hash_db::Hasher; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryFrom, marker::PhantomData, vec::Vec}; +use sp_std::{marker::PhantomData, vec::Vec}; /// Bidirectional message bridge. pub trait MessageBridge { diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index cb536eb07ff..efcbfb1654b 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -49,7 +49,7 @@ use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, }; -use sp_std::{boxed::Box, convert::TryInto, prelude::*}; +use sp_std::{boxed::Box, prelude::*}; mod call_ext; #[cfg(test)] diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 1b1c623104f..369386e41b0 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, FixedPointOperand, }; -use sp_std::{convert::TryFrom, fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; +use sp_std::{fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; /// Chain call, that is either SCALE-encoded, or decoded. #[derive(Debug, Clone, PartialEq)] diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index c9c5c941291..5daba0351ad 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -31,7 +31,7 @@ use sp_runtime::{ traits::{BadOrigin, Header as HeaderT, UniqueSaturatedInto}, RuntimeDebug, }; -use sp_std::{convert::TryFrom, fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; +use sp_std::{fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; pub use chain::{ AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index abeab8c1402..58e9ded312d 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -46,7 +46,7 @@ use relay_utils::{ }; use sp_core::Pair; use sp_runtime::traits::Zero; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData}; /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs index 27bf6186c3b..b30e75bd8ba 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs @@ -32,7 +32,7 @@ use relay_substrate_client::{ use relay_utils::metrics::{MetricsParams, StandaloneMetric}; use sp_core::storage::StorageData; use sp_runtime::{FixedPointNumber, FixedU128}; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 9396e785530..633b11f0b80 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -45,7 +45,7 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; -use std::{convert::TryFrom, ops::RangeInclusive}; +use std::ops::RangeInclusive; /// Message receiving proof returned by the target Substrate node. pub type SubstrateMessagesDeliveryProof = diff --git a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs index 826d535c2cb..31a8992442d 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs @@ -3,7 +3,7 @@ use snowbridge_core::{inbound::Log, ChannelId}; use sp_core::{RuntimeDebug, H160, H256}; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_std::prelude::*; use alloy_primitives::B256; use alloy_sol_types::{sol, SolEvent}; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index 8acbb0c2916..4a1486204eb 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -50,7 +50,7 @@ use frame_system::ensure_signed; use scale_info::TypeInfo; use sp_core::{H160, H256}; use sp_runtime::traits::Zero; -use sp_std::{convert::TryFrom, vec}; +use sp_std::vec; use xcm::prelude::{ send_xcm, Instruction::SetTopic, Junction::*, Location, SendError as XcmpSendError, SendXcm, Xcm, XcmContext, XcmHash, diff --git a/bridges/snowbridge/primitives/beacon/src/bits.rs b/bridges/snowbridge/primitives/beacon/src/bits.rs index 72b7135ee29..fb03588cf8b 100644 --- a/bridges/snowbridge/primitives/beacon/src/bits.rs +++ b/bridges/snowbridge/primitives/beacon/src/bits.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; use ssz_rs::{Bitvector, Deserialize}; pub fn decompress_sync_committee_bits< diff --git a/bridges/snowbridge/primitives/beacon/src/serde_utils.rs b/bridges/snowbridge/primitives/beacon/src/serde_utils.rs index 07f5cbe724e..5e39ff91225 100644 --- a/bridges/snowbridge/primitives/beacon/src/serde_utils.rs +++ b/bridges/snowbridge/primitives/beacon/src/serde_utils.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Deserializer}; // helper to deserialize arbitrary arrays like [T; N] pub mod arrays { - use std::{convert::TryInto, marker::PhantomData}; + use std::marker::PhantomData; use serde::{ de::{SeqAccess, Visitor}, diff --git a/bridges/snowbridge/primitives/ethereum/src/header.rs b/bridges/snowbridge/primitives/ethereum/src/header.rs index f0b51f8c79d..48fa179fe4f 100644 --- a/bridges/snowbridge/primitives/ethereum/src/header.rs +++ b/bridges/snowbridge/primitives/ethereum/src/header.rs @@ -8,7 +8,7 @@ use rlp::RlpStream; use scale_info::TypeInfo; use sp_io::hashing::keccak_256; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; diff --git a/bridges/snowbridge/primitives/ethereum/src/mpt.rs b/bridges/snowbridge/primitives/ethereum/src/mpt.rs index 9a2dae486dc..0365f5e994f 100644 --- a/bridges/snowbridge/primitives/ethereum/src/mpt.rs +++ b/bridges/snowbridge/primitives/ethereum/src/mpt.rs @@ -3,7 +3,7 @@ //! Helper types to work with Ethereum's Merkle Patricia Trie nodes use ethereum_types::H256; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_std::prelude::*; pub trait Node { fn contains_hash(&self, hash: H256) -> bool; diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 92970339fac..7cbb3857403 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -3,7 +3,7 @@ name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" version = "0.2.0" authors = ["Snowfork "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 5b7669c88f4..776052215d9 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -55,7 +55,7 @@ use sp_runtime::{ }; use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, time::Duration}; +use std::{error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. pub struct Params { diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index a4c22a45266..1047c6219ad 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -48,7 +48,7 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_state_machine::Backend as _; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator as collator_util; diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 3fe87e94b7b..09416233ea9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -67,7 +67,7 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator::{self as collator_util, SlotClaim}; diff --git a/cumulus/client/network/src/lib.rs b/cumulus/client/network/src/lib.rs index ebd557b805c..f442ed5840b 100644 --- a/cumulus/client/network/src/lib.rs +++ b/cumulus/client/network/src/lib.rs @@ -36,7 +36,7 @@ use polkadot_primitives::{ use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, future::FutureExt, Future}; -use std::{convert::TryFrom, fmt, marker::PhantomData, pin::Pin, sync::Arc}; +use std::{fmt, marker::PhantomData, pin::Pin, sync::Arc}; #[cfg(test)] mod tests; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index b3fac47cb4a..207259bee52 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-collective-content" version = "0.6.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true description = "Managed content" license = "Apache-2.0" diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index 0fc0bb3d278..89851c4a033 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -38,7 +38,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{self as util, Validator}; use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; use sp_keystore::{Error as KeystoreError, KeystorePtr}; -use std::{collections::HashMap, iter::FromIterator, time::Duration}; +use std::{collections::HashMap, time::Duration}; use wasm_timer::{Delay, Instant}; mod metrics; diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index 188b51ebccc..dc37f73ec8a 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -40,7 +40,7 @@ use sp_core::Pair as PairT; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; -use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; const TIMEOUT: Duration = Duration::from_millis(50); macro_rules! launch { diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index e6aa55235b7..879caf92328 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -16,7 +16,6 @@ use std::{ collections::{HashMap, HashSet}, - convert::TryInto, time::Duration, }; diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index f7b07133bff..ac8c060827f 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -20,9 +20,7 @@ use futures::{ use futures_timer::Delay; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - convert::TryInto, future::Future, - iter::FromIterator, time::{Duration, Instant}, }; use tokio_util::sync::CancellationToken; diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 0dea5ad0996..d4c5f95034a 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -55,7 +55,7 @@ use sp_application_crypto::{sr25519::Pair, AppCrypto, Pair as TraitPair}; use sp_authority_discovery::AuthorityPair; use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; -use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use util::reputation::add_reputation; // Some deterministic genesis hash for protocol names diff --git a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs index fa2c7687b38..29798c785b9 100644 --- a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{collections::HashSet, convert::TryFrom}; +use std::collections::HashSet; pub use sc_network::ReputationChange; pub use sc_network_types::PeerId; diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index e9e51941b1a..32ce352c5c0 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -26,7 +26,6 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use core::convert::{TryFrom, TryInto}; use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v3/junctions.rs b/polkadot/xcm/src/v3/junctions.rs index 9748e81fa55..7b014304fda 100644 --- a/polkadot/xcm/src/v3/junctions.rs +++ b/polkadot/xcm/src/v3/junctions.rs @@ -17,7 +17,7 @@ //! XCM `Junctions`/`InteriorMultiLocation` datatype. use super::{Junction, MultiLocation, NetworkId}; -use core::{convert::TryFrom, mem, result}; +use core::{mem, result}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index d4e2da07a25..e7c57f414eb 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -29,11 +29,7 @@ use super::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - result, -}; +use core::{fmt::Debug, result}; use derivative::Derivative; use parity_scale_codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 0662077b19d..9a67b0e4986 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -42,10 +42,7 @@ use crate::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; -use core::{ - cmp::Ordering, - convert::{TryFrom, TryInto}, -}; +use core::cmp::Ordering; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 18fe01ec8fa..731e277b29d 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -20,10 +20,7 @@ use super::{Junction, Junctions}; use crate::{ v2::MultiLocation as OldMultiLocation, v4::Location as NewMultiLocation, VersionedLocation, }; -use core::{ - convert::{TryFrom, TryInto}, - result, -}; +use core::result; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -766,7 +763,6 @@ mod tests { #[test] fn conversion_from_other_types_works() { use crate::v2; - use core::convert::TryInto; fn takes_multilocation>(_arg: Arg) {} diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index 8abd8f9f8fd..6b6d200f32f 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -34,10 +34,7 @@ use crate::v3::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; -use core::{ - cmp::Ordering, - convert::{TryFrom, TryInto}, -}; +use core::cmp::Ordering; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v4/junction.rs b/polkadot/xcm/src/v4/junction.rs index b5d10484aa0..3ae97de5e9b 100644 --- a/polkadot/xcm/src/v4/junction.rs +++ b/polkadot/xcm/src/v4/junction.rs @@ -23,7 +23,6 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use core::convert::TryFrom; use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v4/junctions.rs b/polkadot/xcm/src/v4/junctions.rs index 48712dd74c6..6d1af59e13d 100644 --- a/polkadot/xcm/src/v4/junctions.rs +++ b/polkadot/xcm/src/v4/junctions.rs @@ -18,7 +18,7 @@ use super::{Junction, Location, NetworkId}; use alloc::sync::Arc; -use core::{convert::TryFrom, mem, ops::Range, result}; +use core::{mem, ops::Range, result}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index 9275bfdb949..cee76b68940 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -18,10 +18,7 @@ use super::{traits::Reanchorable, Junction, Junctions}; use crate::{v3::MultiLocation as OldLocation, VersionedLocation}; -use core::{ - convert::{TryFrom, TryInto}, - result, -}; +use core::result; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -723,7 +720,6 @@ mod tests { #[test] fn conversion_from_other_types_works() { use crate::v3; - use core::convert::TryInto; fn takes_location>(_arg: Arg) {} diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 30ee485589a..77b6d915fcb 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -24,11 +24,7 @@ use super::v3::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - result, -}; +use core::{fmt::Debug, result}; use derivative::Derivative; use parity_scale_codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index 63d254a1067..16ce3d2cf8f 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use super::{test_utils::*, *}; -use core::convert::TryInto; use frame_support::{ assert_err, traits::{ConstU32, ContainsPair, ProcessMessageError}, diff --git a/substrate/client/consensus/grandpa/rpc/src/lib.rs b/substrate/client/consensus/grandpa/rpc/src/lib.rs index 0557eab93e2..68de068c305 100644 --- a/substrate/client/consensus/grandpa/rpc/src/lib.rs +++ b/substrate/client/consensus/grandpa/rpc/src/lib.rs @@ -125,7 +125,7 @@ where #[cfg(test)] mod tests { use super::*; - use std::{collections::HashSet, convert::TryInto, sync::Arc}; + use std::{collections::HashSet, sync::Arc}; use jsonrpsee::{core::EmptyServerParams as EmptyParams, types::SubscriptionId, RpcModule}; use parity_scale_codec::{Decode, Encode}; diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs index d3e2beb84e7..31df038044a 100644 --- a/substrate/client/consensus/grandpa/src/environment.rs +++ b/substrate/client/consensus/grandpa/src/environment.rs @@ -18,7 +18,6 @@ use std::{ collections::{BTreeMap, HashMap}, - iter::FromIterator, marker::PhantomData, pin::Pin, sync::Arc, diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 65b81bda4b0..e605a06c9d9 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-mixnet" version = "0.4.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs index 791821b3f75..33c090ae50e 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs @@ -19,7 +19,6 @@ use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; use std::{ - iter::FromIterator, pin::Pin, task::{Context, Poll}, vec, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 84bab86581c..ef8d8758f3d 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -2,7 +2,7 @@ name = "polkadot-sdk-frame" version = "0.1.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "paritytech.github.io" repository.workspace = true diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 710c32a848d..09e2045555b 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -19,11 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use core::{ - cmp, - convert::{TryFrom, TryInto}, - mem::size_of, -}; +use core::{cmp, mem::size_of}; use sp_runtime::traits::{Bounded, Hash, StaticLookup}; use frame_benchmarking::{account, v2::*, BenchmarkError}; diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index 1f06241e9c8..ed771c7226e 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -101,7 +101,7 @@ use sp_runtime::{ traits::{Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; use frame_support::{ dispatch::{DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index b183e412bed..7116e69efa1 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -17,7 +17,6 @@ //! Test utilities -use core::convert::{TryFrom, TryInto}; pub use sp_core::H256; use sp_runtime::traits::Hash; pub use sp_runtime::{ diff --git a/substrate/frame/alliance/src/types.rs b/substrate/frame/alliance/src/types.rs index 784993b2bc1..149030b52c6 100644 --- a/substrate/frame/alliance/src/types.rs +++ b/substrate/frame/alliance/src/types.rs @@ -19,7 +19,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{traits::ConstU32, BoundedVec}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; /// A Multihash instance that only supports the basic functionality and no hashing. #[derive( diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 3a0e4f720f9..48cb25f9094 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-example-frame-crate" version = "0.0.1" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 6a4ef5c29ac..964d6acb889 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "pallet-mixnet" version = "0.4.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 9019a863ad8..a7967536079 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -47,7 +47,7 @@ pub mod weights; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; use sp_runtime::traits::StaticLookup; -use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs index 2bf2ebee0a4..4be0776d6c1 100644 --- a/substrate/frame/safe-mode/src/lib.rs +++ b/substrate/frame/safe-mode/src/lib.rs @@ -79,7 +79,6 @@ pub mod mock; mod tests; pub mod weights; -use core::convert::TryInto; use frame_support::{ defensive_assert, pallet_prelude::*, diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 09977142efc..888b1d8f31f 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-sassafras" version = "0.3.5-dev" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/substrate/frame/transaction-payment/rpc/src/lib.rs b/substrate/frame/transaction-payment/rpc/src/lib.rs index f5323cf852e..050c7fb8915 100644 --- a/substrate/frame/transaction-payment/rpc/src/lib.rs +++ b/substrate/frame/transaction-payment/rpc/src/lib.rs @@ -17,7 +17,7 @@ //! RPC interface for the transaction payment pallet. -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; use codec::{Codec, Decode}; use jsonrpsee::{ diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs index 31be575fba7..5904b5ed316 100644 --- a/substrate/frame/tx-pause/src/lib.rs +++ b/substrate/frame/tx-pause/src/lib.rs @@ -87,7 +87,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use sp_runtime::{traits::Dispatchable, DispatchResult}; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; pub use pallet::*; pub use weights::*; diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 07304ed9b24..50348054da0 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-sassafras" version = "0.3.4-dev" authors.workspace = true description = "Primitives for Sassafras consensus" -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/polkadot-sdk/" diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml index c6b5a065b6d..463eaea8ea3 100644 --- a/substrate/primitives/core/fuzz/Cargo.toml +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -2,6 +2,7 @@ name = "sp-core-fuzz" version = "0.0.0" publish = false +edition.workspace = true [lints] workspace = true diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 07840ca63cb..166609ad922 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "sp-mixnet" version = "0.4.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ace88aee262..8b6f746eaba 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -33,7 +33,6 @@ use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, - iter::FromIterator, }; /// Simple Map-based Externalities impl. -- GitLab From 92a348f57deed44789511df73d3fbbbcb58d98cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Apr 2024 20:36:25 +0400 Subject: [PATCH 089/107] Bump snow from 0.9.3 to 0.9.6 (#4061) Bumps [snow](https://github.com/mcginty/snow) from 0.9.3 to 0.9.6.

Release notes

Sourced from snow's releases.

v0.9.6

  • Validate invalid PSK positions when building a Noise protocol.
  • Raise errors in various typos/mistakes in Noise patterns when parsing.
  • Deprecate the sodiumoxide backend, as that crate is no longer maintained. We may eventually migrate it to a maintaned version of the crate, but for now it's best to warn users.
  • Set a hard limit in read_message() in transport mode to 65535 to be fully compliant with the Noise specification.

Full Changelog: https://github.com/mcginty/snow/compare/v0.9.5...v0.9.6

v0.9.5

This is a security release that fixes a logic flaw in decryption in TransportState (i.e. the stateful one), where the nonce could increase even when decryption failed, which can cause a desync between the sender and receiver, opening this up as a denial of service vector if the attacker has the ability to inject packets in the channel Noise is talking over.

More details can be found in the advisory: https://github.com/mcginty/snow/security/advisories/GHSA-7g9j-g5jg-3vv3

All users are encouraged to update.

v0.9.4

This is a dependency version bump release because a couple of important dependencies released new versions that needed a Cargo.toml bump:

  • ring 0.17
  • pqcrypto-kyber 0.8
  • aes-gcm 0.10
  • chacha20poly1305 0.10
Commits
  • a4be73f meta: v0.9.6 release
  • 9e53dcf TransportState: limit read_message size to 65535
  • faf0560 Deprecate sodiumoxide resolver
  • 308a24d Add warnings about multiple calls to same method in Builder
  • f280991 Error when extraneous parameters are included in string to parse
  • dbdcc48 Error on duplicate modifiers in parameter string
  • 8b1a819 Validate PSK index in pattern to avoid panic
  • 74e30cf meta: v0.9.5 release
  • 12e8ae5 Stateful nonce desync fix
  • 02c26b7 Remove clap from simple example
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=snow&package-manager=cargo&previous-version=0.9.3&new-version=0.9.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 153 ++++++++--------------------------------------------- 1 file changed, 23 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d64800fb085..67b0ad4def2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,15 +42,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "aead" version = "0.5.2" @@ -61,18 +52,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -84,31 +63,17 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle 2.5.0", -] - [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", + "aead", + "aes", "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "ctr", + "ghash", "subtle 2.5.0", ] @@ -2540,18 +2505,6 @@ dependencies = [ "keystream", ] -[[package]] -name = "chacha20" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "zeroize", -] - [[package]] name = "chacha20" version = "0.9.1" @@ -2565,14 +2518,14 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", - "chacha20 0.8.2", - "cipher 0.3.0", - "poly1305 0.7.2", + "aead", + "chacha20", + "cipher 0.4.4", + "poly1305", "zeroize", ] @@ -2652,15 +2605,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -2669,6 +2613,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -3676,15 +3621,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.9.2" @@ -6395,16 +6331,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.0" @@ -6412,7 +6338,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug 0.3.0", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -12032,7 +11958,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", "rand 0.8.5", - "rand_core 0.6.4", + "rand_core 0.5.1", "serde", "unicode-normalization", ] @@ -14568,17 +14494,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "poly1305" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" -dependencies = [ - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", -] - [[package]] name = "poly1305" version = "0.8.0" @@ -14587,19 +14502,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", -] - -[[package]] -name = "polyval" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -14611,7 +14514,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -17890,7 +17793,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ - "aead 0.5.2", + "aead", "arrayref", "arrayvec 0.7.4", "curve25519-dalek 4.1.2", @@ -18542,7 +18445,7 @@ dependencies = [ "bip39", "blake2-rfc", "bs58 0.5.0", - "chacha20 0.9.1", + "chacha20", "crossbeam-queue", "derive_more", "ed25519-zebra 4.0.3", @@ -18564,7 +18467,7 @@ dependencies = [ "num-traits", "pbkdf2", "pin-project", - "poly1305 0.8.0", + "poly1305", "rand 0.8.5", "rand_chacha 0.3.1", "ruzstd", @@ -18627,16 +18530,16 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.9.2", + "aes-gcm", "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek 4.1.2", "rand_core 0.6.4", - "ring 0.16.20", + "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.7", "subtle 2.5.0", @@ -19900,7 +19803,7 @@ dependencies = [ name = "sp-statement-store" version = "10.0.0" dependencies = [ - "aes-gcm 0.10.3", + "aes-gcm", "curve25519-dalek 4.1.2", "ed25519-dalek 2.1.0", "hkdf", @@ -22142,16 +22045,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.5.0", -] - [[package]] name = "universal-hash" version = "0.5.1" -- GitLab From f34d8e3cf033e2a22a41b505c437972a5dc83d78 Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:13:01 +0700 Subject: [PATCH 090/107] Remove hard-coded indices from pallet-xcm tests (#4248) # ISSUE - Link to issue: https://github.com/paritytech/polkadot-sdk/issues/4237 # DESCRIPTION Remove all ModuleError with hard-coded indices to pallet Error. For example: ```rs Err(DispatchError::Module(ModuleError { index: 4, error: [2, 0, 0, 0], message: Some("Filtered") })) ``` To ```rs let expected_result = Err(crate::Error::::Filtered.into()); assert_eq!(result, expected_result); ``` # TEST OUTCOME ``` test result: ok. 74 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.02s ``` --------- Co-authored-by: Oliver Tale-Yazdi --- .../pallet-xcm/src/tests/assets_transfer.rs | 218 ++++-------------- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 6 +- 2 files changed, 40 insertions(+), 184 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 7dc05c1cc70..f42e220d693 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -22,12 +22,12 @@ use crate::{ DispatchResult, OriginFor, }; use frame_support::{ - assert_ok, + assert_err, assert_ok, traits::{tokens::fungibles::Inspect, Currency}, weights::Weight, }; use polkadot_parachain_primitives::primitives::Id as ParaId; -use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use sp_runtime::traits::AccountIdConversion; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -112,14 +112,8 @@ fn limited_teleport_filtered_assets_disallowed() { 0, Unlimited, ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + let expected_result = Err(crate::Error::::Filtered.into()); + assert_eq!(result, expected_result); }); } @@ -365,11 +359,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works( /// Test `limited_teleport_assets` with local asset reserve and local fee reserve disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -527,11 +517,7 @@ fn transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -542,11 +528,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_ /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -633,11 +615,7 @@ fn remote_asset_reserve_and_local_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and local fee reserve is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -648,11 +626,7 @@ fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -662,11 +636,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disal /// Test `limited_teleport_assets` with remote asset reserve and local fee reserve is disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -745,7 +715,7 @@ fn local_asset_reserve_and_destination_fee_reserve_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } let weight = BaseXcmWeight::get() * 3; @@ -821,11 +791,7 @@ fn transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() /// disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -835,11 +801,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_ /// Test `limited_teleport_assets` with local asset reserve and destination fee reserve disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -993,11 +955,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_re /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1102,11 +1060,7 @@ fn remote_asset_reserve_and_destination_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and destination fee reserve is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1117,11 +1071,7 @@ fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallo /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1132,11 +1082,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve /// disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1222,11 +1168,7 @@ fn local_asset_reserve_and_remote_fee_reserve_call_disallowed( /// Test `transfer_assets` with local asset reserve and remote fee reserve is disallowed. #[test] fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1237,11 +1179,7 @@ fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() /// disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1251,11 +1189,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disal /// Test `limited_teleport_assets` with local asset reserve and remote fee reserve is disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1366,11 +1300,7 @@ fn destination_asset_reserve_and_remote_fee_reserve_call_disallowed( /// Test `transfer_assets` with destination asset reserve and remote fee reserve is disallowed. #[test] fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1381,11 +1311,7 @@ fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallo /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1396,11 +1322,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1485,7 +1407,7 @@ fn remote_asset_reserve_and_remote_fee_reserve_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } assert!(matches!( @@ -1558,11 +1480,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_work /// disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_remote_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1702,11 +1620,7 @@ fn transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { /// Test `limited_reserve_transfer_assets` with local asset reserve and teleported fee disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_teleported_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1716,11 +1630,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowe /// Test `limited_teleport_assets` with local asset reserve and teleported fee disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_teleported_fee_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1802,7 +1712,7 @@ fn destination_asset_reserve_and_teleported_fee_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } let weight = BaseXcmWeight::get() * 4; @@ -1891,11 +1801,7 @@ fn transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_teleported_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1905,11 +1811,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_dis /// Test `limited_teleport_assets` with destination asset reserve and teleported fee disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_teleported_fee_call( XcmPallet::limited_teleport_assets, expected_result, @@ -2013,11 +1915,7 @@ fn remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and teleported fee is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -2028,11 +1926,7 @@ fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2042,11 +1936,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallow /// Test `limited_teleport_assets` with remote asset reserve and teleported fee is disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -2088,14 +1978,7 @@ fn reserve_transfer_assets_with_teleportable_asset_disallowed() { fee_index as u32, Unlimited, ); - assert_eq!( - res, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + assert_err!(res, crate::Error::::Filtered); // Alice native asset is still same assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); // Alice USDT balance is still same @@ -2136,14 +2019,7 @@ fn transfer_assets_with_filtered_teleported_fee_disallowed() { fee_index as u32, Unlimited, ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + assert_err!(result, crate::Error::::Filtered); }); } @@ -2350,11 +2226,7 @@ fn transfer_assets_with_teleportable_asset_and_local_fee_reserve_works() { /// Test `limited_reserve_transfer_assets` with teleportable asset and local fee reserve disallowed. #[test] fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleport_asset_using_local_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2364,11 +2236,7 @@ fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallo /// Test `limited_teleport_assets` with teleportable asset and local fee reserve disallowed. #[test] fn teleport_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleport_asset_using_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -2541,11 +2409,7 @@ fn transfer_teleported_assets_using_destination_reserve_fee_works() { /// disallowed. #[test] fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleported_asset_using_destination_reserve_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2555,11 +2419,7 @@ fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() /// Test `limited_teleport_assets` with teleported asset reserve and destination fee disallowed. #[test] fn teleport_assets_using_destination_reserve_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleported_asset_using_destination_reserve_fee_call( XcmPallet::limited_teleport_assets, expected_result, diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 8faf16e0d2a..782c8bed478 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -557,11 +557,7 @@ fn incomplete_execute_reverts_side_effects() { ), pays_fee: frame_support::dispatch::Pays::Yes, }, - error: sp_runtime::DispatchError::Module(sp_runtime::ModuleError { - index: 4, - error: [24, 0, 0, 0,], - message: Some("LocalExecutionIncomplete") - }) + error: sp_runtime::DispatchError::from(Error::::LocalExecutionIncomplete) }) ); }); -- GitLab From 0031d49d1ec083c62a4e2b5bf594b7f45f84ab0d Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Mon, 29 Apr 2024 17:55:45 +0200 Subject: [PATCH 091/107] [Staking] Not allow reap stash for virtual stakers (#4311) Related to https://github.com/paritytech/polkadot-sdk/pull/3905. Since virtual stakers does not have a real balance, they should not be allowed to be reaped. The proper reaping for agents slashed will be done in a separate PR. --- prdoc/pr_4311.prdoc | 9 ++ substrate/frame/staking/src/pallet/impls.rs | 4 +- substrate/frame/staking/src/pallet/mod.rs | 8 ++ substrate/frame/staking/src/tests.rs | 93 +++++++++++++++++++++ 4 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 prdoc/pr_4311.prdoc diff --git a/prdoc/pr_4311.prdoc b/prdoc/pr_4311.prdoc new file mode 100644 index 00000000000..cf32acaf008 --- /dev/null +++ b/prdoc/pr_4311.prdoc @@ -0,0 +1,9 @@ +title: Not allow reap stash for virtual stakers. + +doc: + - audience: Runtime Dev + description: | + Add guards to staking dispathables to prevent virtual stakers to be reaped. + +crates: +- name: pallet-staking diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 4eb24311ab3..5b2a55303e2 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -87,10 +87,12 @@ impl Pallet { StakingLedger::::paired_account(Stash(stash.clone())) } - /// Inspects and returns the corruption state of a ledger and bond, if any. + /// Inspects and returns the corruption state of a ledger and direct bond, if any. /// /// Note: all operations in this method access directly the `Bonded` and `Ledger` storage maps /// instead of using the [`StakingLedger`] API since the bond and/or ledger may be corrupted. + /// It is also meant to check state for direct bonds and may not work as expected for virtual + /// bonds. pub(crate) fn inspect_bond_state( stash: &T::AccountId, ) -> Result> { diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 9c968d88344..16ad510c562 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -868,6 +868,8 @@ pub mod pallet { RewardDestinationRestricted, /// Not enough funds available to withdraw. NotEnoughFunds, + /// Operation not allowed for virtual stakers. + VirtualStakerNotAllowed, } #[pallet::hooks] @@ -1634,6 +1636,9 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; + // virtual stakers should not be allowed to be reaped. + ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); + let ed = T::Currency::minimum_balance(); let reapable = T::Currency::total_balance(&stash) < ed || Self::ledger(Stash(stash.clone())).map(|l| l.total).unwrap_or_default() < ed; @@ -1994,6 +1999,9 @@ pub mod pallet { ) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; + // cannot restore ledger for virtual stakers. + ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); + let current_lock = T::Currency::balance_locked(crate::STAKING_ID, &stash); let stash_balance = T::Currency::free_balance(&stash); diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index d05752f54be..3cb51604aa6 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -7205,6 +7205,99 @@ mod staking_unchecked { assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); }) } + + #[test] + fn virtual_stakers_cannot_be_reaped() { + ExtBuilder::default() + // we need enough validators such that disables are allowed. + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + // make 101 only nominate 11. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![11])); + + mock::start_active_era(1); + + // slash all stake. + let slash_percent = Perbill::from_percent(100); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let validator_balance = Balances::free_balance(&11); + let validator_stake = Staking::ledger(11.into()).unwrap().total; + let nominator_balance = Balances::free_balance(&101); + let nominator_stake = Staking::ledger(101.into()).unwrap().total; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); + + // both stakes must have been decreased to 0. + assert_eq!(Staking::ledger(101.into()).unwrap().active, 0); + assert_eq!(Staking::ledger(11.into()).unwrap().active, 0); + + // all validator stake is slashed + assert_eq_error_rate!( + validator_balance - validator_stake, + Balances::free_balance(&11), + 1 + ); + // Because slashing happened. + assert!(is_disabled(11)); + + // Virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // Slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_stake); + + // validator can be reaped. + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(10), 11, u32::MAX)); + // nominator is a virtual staker and cannot be reaped. + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(10), 101, u32::MAX), + Error::::VirtualStakerNotAllowed + ); + }) + } + + #[test] + fn restore_ledger_not_allowed_for_virtual_stakers() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + // 333 is corrupted + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // migrate to virtual staker. + ::migrate_to_virtual_staker(&333); + + // recover the ledger won't work for virtual staker + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None), + Error::::VirtualStakerNotAllowed + ); + + // migrate 333 back to normal staker + >::remove(333); + + // try restore again + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + }) + } } mod ledger { use super::*; -- GitLab From 4875ea11aeef4f3fc7d724940e5ffb703830619b Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 29 Apr 2024 17:22:23 -0400 Subject: [PATCH 092/107] Refactor XCM Simulator Example (#4220) This PR does a "developer experience" refactor of the XCM Simulator Example. I was looking for existing code / documentation where developers could better learn about working with and configuring XCM. The XCM Simulator was a natural starting point due to the fact that it can emulate end to end XCM scenarios, without needing to spawn multiple real chains. However, the XCM Simulator Example was just 3 giant files with a ton of configurations, runtime, pallets, and tests mashed together. This PR breaks down the XCM Simulator Example in a way that I believe is more approachable by a new developer who is looking to navigate the various components of the end to end example, and modify it themselves. The basic structure is: - xcm simulator example - lib (tries to only use the xcm simulator macros) - tests - relay-chain - mod (basic runtime that developers should be familiar with) - xcm-config - mod (contains the `XcmConfig` type - various files for each custom configuration - parachain - mock_msg_queue (custom pallet for simulator example) - mod (basic runtime that developers should be familiar with) - xcm-config - mod (contains the `XcmConfig` type - various files for each custom configuration I would like to add more documentation to this too, but I think this is a first step to be accepted which will affect how documentation is added to the example --------- Co-authored-by: Francisco Aguirre Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- polkadot/xcm/xcm-simulator/example/src/lib.rs | 529 +----------------- .../xcm-simulator/example/src/parachain.rs | 470 ---------------- .../example/src/parachain/mock_msg_queue.rs | 185 ++++++ .../example/src/parachain/mod.rs | 182 ++++++ .../parachain/xcm_config/asset_transactor.rs | 39 ++ .../src/parachain/xcm_config/barrier.rs | 20 + .../src/parachain/xcm_config/constants.rs | 30 + .../xcm_config/location_converter.rs | 25 + .../example/src/parachain/xcm_config/mod.rs | 63 +++ .../parachain/xcm_config/origin_converter.rs | 29 + .../src/parachain/xcm_config/reserve.rs | 21 + .../src/parachain/xcm_config/teleporter.rs | 27 + .../src/parachain/xcm_config/weigher.rs | 27 + .../{relay_chain.rs => relay_chain/mod.rs} | 145 +---- .../xcm_config/asset_transactor.rs | 38 ++ .../src/relay_chain/xcm_config/barrier.rs | 20 + .../src/relay_chain/xcm_config/constants.rs | 31 + .../xcm_config/location_converter.rs | 25 + .../example/src/relay_chain/xcm_config/mod.rs | 62 ++ .../xcm_config/origin_converter.rs | 34 ++ .../src/relay_chain/xcm_config/weigher.rs | 27 + .../xcm/xcm-simulator/example/src/tests.rs | 513 +++++++++++++++++ prdoc/pr_4220.prdoc | 11 + 23 files changed, 1435 insertions(+), 1118 deletions(-) delete mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs rename polkadot/xcm/xcm-simulator/example/src/{relay_chain.rs => relay_chain/mod.rs} (53%) create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs create mode 100644 polkadot/xcm/xcm-simulator/example/src/tests.rs create mode 100644 prdoc/pr_4220.prdoc diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index 56e204bf571..6fb9a69770e 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -17,13 +17,16 @@ mod parachain; mod relay_chain; +#[cfg(test)] +mod tests; + use sp_runtime::BuildStorage; use sp_tracing; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; -pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); pub const INITIAL_BALANCE: u128 = 1_000_000_000; decl_test_parachain! { @@ -68,27 +71,27 @@ decl_test_network! { pub fn parent_account_id() -> parachain::AccountId { let location = (Parent,); - parachain::LocationToAccountId::convert_location(&location.into()).unwrap() + parachain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn child_account_id(para: u32) -> relay_chain::AccountId { let location = (Parachain(para),); - relay_chain::LocationToAccountId::convert_location(&location.into()).unwrap() + relay_chain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn child_account_account_id(para: u32, who: sp_runtime::AccountId32) -> relay_chain::AccountId { let location = (Parachain(para), AccountId32 { network: None, id: who.into() }); - relay_chain::LocationToAccountId::convert_location(&location.into()).unwrap() + relay_chain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn sibling_account_account_id(para: u32, who: sp_runtime::AccountId32) -> parachain::AccountId { let location = (Parent, Parachain(para), AccountId32 { network: None, id: who.into() }); - parachain::LocationToAccountId::convert_location(&location.into()).unwrap() + parachain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn parent_account_account_id(who: sp_runtime::AccountId32) -> parachain::AccountId { let location = (Parent, AccountId32 { network: None, id: who.into() }); - parachain::LocationToAccountId::convert_location(&location.into()).unwrap() + parachain::location_converter::LocationConverter::convert_location(&location.into()).unwrap() } pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { @@ -137,517 +140,3 @@ pub fn relay_ext() -> sp_io::TestExternalities { pub type RelayChainPalletXcm = pallet_xcm::Pallet; pub type ParachainPalletXcm = pallet_xcm::Pallet; - -#[cfg(test)] -mod tests { - use super::*; - - use codec::Encode; - use frame_support::{assert_ok, weights::Weight}; - use xcm::latest::QueryResponseInfo; - use xcm_simulator::TestExt; - - // Helper function for forming buy execution message - fn buy_execution(fees: impl Into) -> Instruction { - BuyExecution { fees: fees.into(), weight_limit: Unlimited } - } - - #[test] - fn remote_account_ids_work() { - child_account_account_id(1, ALICE); - sibling_account_account_id(1, ALICE); - parent_account_account_id(ALICE); - } - - #[test] - fn dmp() { - MockNet::reset(); - - let remark = parachain::RuntimeCall::System( - frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, - ); - Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::send_xcm( - Here, - Parachain(1), - Xcm(vec![Transact { - origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), - call: remark.encode().into(), - }]), - )); - }); - - ParaA::execute_with(|| { - use parachain::{RuntimeEvent, System}; - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::System(frame_system::Event::Remarked { .. }) - ))); - }); - } - - #[test] - fn ump() { - MockNet::reset(); - - let remark = relay_chain::RuntimeCall::System( - frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, - ); - ParaA::execute_with(|| { - assert_ok!(ParachainPalletXcm::send_xcm( - Here, - Parent, - Xcm(vec![Transact { - origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), - call: remark.encode().into(), - }]), - )); - }); - - Relay::execute_with(|| { - use relay_chain::{RuntimeEvent, System}; - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::System(frame_system::Event::Remarked { .. }) - ))); - }); - } - - #[test] - fn xcmp() { - MockNet::reset(); - - let remark = parachain::RuntimeCall::System( - frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, - ); - ParaA::execute_with(|| { - assert_ok!(ParachainPalletXcm::send_xcm( - Here, - (Parent, Parachain(2)), - Xcm(vec![Transact { - origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), - call: remark.encode().into(), - }]), - )); - }); - - ParaB::execute_with(|| { - use parachain::{RuntimeEvent, System}; - assert!(System::events().iter().any(|r| matches!( - r.event, - RuntimeEvent::System(frame_system::Event::Remarked { .. }) - ))); - }); - } - - #[test] - fn reserve_transfer() { - MockNet::reset(); - - let withdraw_amount = 123; - - Relay::execute_with(|| { - assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( - relay_chain::RuntimeOrigin::signed(ALICE), - Box::new(Parachain(1).into()), - Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), - Box::new((Here, withdraw_amount).into()), - 0, - Unlimited, - )); - assert_eq!( - relay_chain::Balances::free_balance(&child_account_id(1)), - INITIAL_BALANCE + withdraw_amount - ); - }); - - ParaA::execute_with(|| { - // free execution, full amount received - assert_eq!( - pallet_balances::Pallet::::free_balance(&ALICE), - INITIAL_BALANCE + withdraw_amount - ); - }); - } - - #[test] - fn remote_locking_and_unlocking() { - MockNet::reset(); - - let locked_amount = 100; - - ParaB::execute_with(|| { - let message = Xcm(vec![LockAsset { - asset: (Here, locked_amount).into(), - unlocker: Parachain(1).into(), - }]); - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); - }); - - Relay::execute_with(|| { - use pallet_balances::{BalanceLock, Reasons}; - assert_eq!( - relay_chain::Balances::locks(&child_account_id(2)), - vec![BalanceLock { - id: *b"py/xcmlk", - amount: locked_amount, - reasons: Reasons::All - }] - ); - }); - - ParaA::execute_with(|| { - assert_eq!( - parachain::MsgQueue::received_dmp(), - vec![Xcm(vec![NoteUnlockable { - owner: (Parent, Parachain(2)).into(), - asset: (Parent, locked_amount).into() - }])] - ); - }); - - ParaB::execute_with(|| { - // Request unlocking part of the funds on the relay chain - let message = Xcm(vec![RequestUnlock { - asset: (Parent, locked_amount - 50).into(), - locker: Parent.into(), - }]); - assert_ok!(ParachainPalletXcm::send_xcm(Here, (Parent, Parachain(1)), message)); - }); - - Relay::execute_with(|| { - use pallet_balances::{BalanceLock, Reasons}; - // Lock is reduced - assert_eq!( - relay_chain::Balances::locks(&child_account_id(2)), - vec![BalanceLock { - id: *b"py/xcmlk", - amount: locked_amount - 50, - reasons: Reasons::All - }] - ); - }); - } - - /// Scenario: - /// A parachain transfers an NFT resident on the relay chain to another parachain account. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn withdraw_and_deposit_nft() { - MockNet::reset(); - - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(1))); - }); - - ParaA::execute_with(|| { - let message = Xcm(vec![TransferAsset { - assets: (GeneralIndex(1), 42u32).into(), - beneficiary: Parachain(2).into(), - }]); - // Send withdraw and deposit - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message)); - }); - - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(2))); - }); - } - - /// Scenario: - /// The relay-chain teleports an NFT to a parachain. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn teleport_nft() { - MockNet::reset(); - - Relay::execute_with(|| { - // Mint the NFT (1, 69) and give it to our "parachain#1 alias". - assert_ok!(relay_chain::Uniques::mint( - relay_chain::RuntimeOrigin::signed(ALICE), - 1, - 69, - child_account_account_id(1, ALICE), - )); - // The parachain#1 alias of Alice is what must hold it on the Relay-chain for it to be - // withdrawable by Alice on the parachain. - assert_eq!( - relay_chain::Uniques::owner(1, 69), - Some(child_account_account_id(1, ALICE)) - ); - }); - ParaA::execute_with(|| { - assert_ok!(parachain::ForeignUniques::force_create( - parachain::RuntimeOrigin::root(), - (Parent, GeneralIndex(1)).into(), - ALICE, - false, - )); - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), - None, - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); - - // IRL Alice would probably just execute this locally on the Relay-chain, but we can't - // easily do that here since we only send between chains. - let message = Xcm(vec![ - WithdrawAsset((GeneralIndex(1), 69u32).into()), - InitiateTeleport { - assets: AllCounted(1).into(), - dest: Parachain(1).into(), - xcm: Xcm(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), - }]), - }, - ]); - // Send teleport - let alice = AccountId32 { id: ALICE.into(), network: None }; - assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); - }); - ParaA::execute_with(|| { - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), - Some(ALICE), - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); - }); - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(1, 69), None); - }); - } - - /// Scenario: - /// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a - /// trustless-backed-derived locally. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn reserve_asset_transfer_nft() { - sp_tracing::init_for_tests(); - MockNet::reset(); - - Relay::execute_with(|| { - assert_ok!(relay_chain::Uniques::force_create( - relay_chain::RuntimeOrigin::root(), - 2, - ALICE, - false - )); - assert_ok!(relay_chain::Uniques::mint( - relay_chain::RuntimeOrigin::signed(ALICE), - 2, - 69, - child_account_account_id(1, ALICE) - )); - assert_eq!( - relay_chain::Uniques::owner(2, 69), - Some(child_account_account_id(1, ALICE)) - ); - }); - ParaA::execute_with(|| { - assert_ok!(parachain::ForeignUniques::force_create( - parachain::RuntimeOrigin::root(), - (Parent, GeneralIndex(2)).into(), - ALICE, - false, - )); - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), - None, - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); - - let message = Xcm(vec![ - WithdrawAsset((GeneralIndex(2), 69u32).into()), - DepositReserveAsset { - assets: AllCounted(1).into(), - dest: Parachain(1).into(), - xcm: Xcm(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), - }]), - }, - ]); - // Send transfer - let alice = AccountId32 { id: ALICE.into(), network: None }; - assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); - }); - ParaA::execute_with(|| { - log::debug!(target: "xcm-executor", "Hello"); - assert_eq!( - parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), - Some(ALICE), - ); - assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); - }); - - Relay::execute_with(|| { - assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_id(1))); - }); - } - - /// Scenario: - /// The relay-chain creates an asset class on a parachain and then Alice transfers her NFT into - /// that parachain's sovereign account, who then mints a trustless-backed-derivative locally. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn reserve_asset_class_create_and_reserve_transfer() { - MockNet::reset(); - - Relay::execute_with(|| { - assert_ok!(relay_chain::Uniques::force_create( - relay_chain::RuntimeOrigin::root(), - 2, - ALICE, - false - )); - assert_ok!(relay_chain::Uniques::mint( - relay_chain::RuntimeOrigin::signed(ALICE), - 2, - 69, - child_account_account_id(1, ALICE) - )); - assert_eq!( - relay_chain::Uniques::owner(2, 69), - Some(child_account_account_id(1, ALICE)) - ); - - let message = Xcm(vec![Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: Weight::from_parts(1_000_000_000, 1024 * 1024), - call: parachain::RuntimeCall::from( - pallet_uniques::Call::::create { - collection: (Parent, 2u64).into(), - admin: parent_account_id(), - }, - ) - .encode() - .into(), - }]); - // Send creation. - assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); - }); - ParaA::execute_with(|| { - // Then transfer - let message = Xcm(vec![ - WithdrawAsset((GeneralIndex(2), 69u32).into()), - DepositReserveAsset { - assets: AllCounted(1).into(), - dest: Parachain(1).into(), - xcm: Xcm(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), - }]), - }, - ]); - let alice = AccountId32 { id: ALICE.into(), network: None }; - assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); - }); - ParaA::execute_with(|| { - assert_eq!(parachain::Balances::reserved_balance(&parent_account_id()), 1000); - assert_eq!( - parachain::ForeignUniques::collection_owner((Parent, 2u64).into()), - Some(parent_account_id()) - ); - }); - } - - /// Scenario: - /// A parachain transfers funds on the relay chain to another parachain account. - /// - /// Asserts that the parachain accounts are updated as expected. - #[test] - fn withdraw_and_deposit() { - MockNet::reset(); - - let send_amount = 10; - - ParaA::execute_with(|| { - let message = Xcm(vec![ - WithdrawAsset((Here, send_amount).into()), - buy_execution((Here, send_amount)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, - ]); - // Send withdraw and deposit - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); - }); - - Relay::execute_with(|| { - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(1)), - INITIAL_BALANCE - send_amount - ); - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(2)), - INITIAL_BALANCE + send_amount - ); - }); - } - - /// Scenario: - /// A parachain wants to be notified that a transfer worked correctly. - /// It sends a `QueryHolding` after the deposit to get notified on success. - /// - /// Asserts that the balances are updated correctly and the expected XCM is sent. - #[test] - fn query_holding() { - MockNet::reset(); - - let send_amount = 10; - let query_id_set = 1234; - - // Send a message which fully succeeds on the relay chain - ParaA::execute_with(|| { - let message = Xcm(vec![ - WithdrawAsset((Here, send_amount).into()), - buy_execution((Here, send_amount)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, - ReportHolding { - response_info: QueryResponseInfo { - destination: Parachain(1).into(), - query_id: query_id_set, - max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), - }, - assets: All.into(), - }, - ]); - // Send withdraw and deposit with query holding - assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone(),)); - }); - - // Check that transfer was executed - Relay::execute_with(|| { - // Withdraw executed - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(1)), - INITIAL_BALANCE - send_amount - ); - // Deposit executed - assert_eq!( - relay_chain::Balances::free_balance(child_account_id(2)), - INITIAL_BALANCE + send_amount - ); - }); - - // Check that QueryResponse message was received - ParaA::execute_with(|| { - assert_eq!( - parachain::MsgQueue::received_dmp(), - vec![Xcm(vec![QueryResponse { - query_id: query_id_set, - response: Response::Assets(Assets::new()), - max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), - querier: Some(Here.into()), - }])], - ); - }); - } -} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs deleted file mode 100644 index 41e62596392..00000000000 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Parachain runtime mock. - -use codec::{Decode, Encode}; -use core::marker::PhantomData; -use frame_support::{ - construct_runtime, derive_impl, parameter_types, - traits::{ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, EverythingBut, Nothing}, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, -}; - -use frame_system::EnsureRoot; -use sp_core::{ConstU32, H256}; -use sp_runtime::{ - traits::{Get, Hash, IdentityLookup}, - AccountId32, -}; -use sp_std::prelude::*; - -use pallet_xcm::XcmPassthrough; -use polkadot_core_primitives::BlockNumber as RelayBlockNumber; -use polkadot_parachain_primitives::primitives::{ - DmpMessageHandler, Id as ParaId, Sibling, XcmpMessageFormat, XcmpMessageHandler, -}; -use xcm::{latest::prelude::*, VersionedXcm}; -use xcm_builder::{ - Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, ConvertedConcreteId, - EnsureDecodableXcm, EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, NoChecking, - NonFungiblesAdapter, ParentIsPreset, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, -}; -use xcm_executor::{ - traits::{ConvertLocation, JustTry}, - Config, XcmExecutor, -}; - -pub type SovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, -); - -pub type AccountId = AccountId32; -pub type Balance = u128; - -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} - -parameter_types! { - pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; - type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; -} - -#[cfg(feature = "runtime-benchmarks")] -pub struct UniquesHelper; -#[cfg(feature = "runtime-benchmarks")] -impl pallet_uniques::BenchmarkHelper for UniquesHelper { - fn collection(i: u16) -> Location { - GeneralIndex(i as u128).into() - } - fn item(i: u16) -> AssetInstance { - AssetInstance::Index(i as u128) - } -} - -impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = Location; - type ItemId = AssetInstance; - type Currency = Balances; - type CreateOrigin = ForeignCreators; - type ForceOrigin = frame_system::EnsureRoot; - type CollectionDeposit = frame_support::traits::ConstU128<1_000>; - type ItemDeposit = frame_support::traits::ConstU128<1_000>; - type MetadataDepositBase = frame_support::traits::ConstU128<1_000>; - type AttributeDepositBase = frame_support::traits::ConstU128<1_000>; - type DepositPerByte = frame_support::traits::ConstU128<1>; - type StringLimit = ConstU32<64>; - type KeyLimit = ConstU32<64>; - type ValueLimit = ConstU32<128>; - type Locker = (); - type WeightInfo = (); - #[cfg(feature = "runtime-benchmarks")] - type Helper = UniquesHelper; -} - -// `EnsureOriginWithArg` impl for `CreateOrigin` which allows only XCM origins -// which are locations containing the class location. -pub struct ForeignCreators; -impl EnsureOriginWithArg for ForeignCreators { - type Success = AccountId; - - fn try_origin( - o: RuntimeOrigin, - a: &Location, - ) -> sp_std::result::Result { - let origin_location = pallet_xcm::EnsureXcm::::try_origin(o.clone())?; - if !a.starts_with(&origin_location) { - return Err(o) - } - SovereignAccountOf::convert_location(&origin_location).ok_or(o) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin(a: &Location) -> Result { - Ok(pallet_xcm::Origin::Xcm(a.clone()).into()) - } -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); - pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); -} - -parameter_types! { - pub const KsmLocation: Location = Location::parent(); - pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); -} - -pub type LocationToAccountId = ( - ParentIsPreset, - SiblingParachainConvertsVia, - AccountId32Aliases, - Account32Hash<(), AccountId>, -); - -pub type XcmOriginToCallOrigin = ( - SovereignSignedViaLocation, - SignedAccountId32AsNative, - XcmPassthrough, -); - -parameter_types! { - pub const UnitWeightCost: Weight = Weight::from_parts(1, 1); - pub KsmPerSecondPerByte: (AssetId, u128, u128) = (AssetId(Parent.into()), 1, 1); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub ForeignPrefix: Location = (Parent,).into(); -} - -pub type LocalAssetTransactor = ( - FungibleAdapter, LocationToAccountId, AccountId, ()>, - NonFungiblesAdapter< - ForeignUniques, - ConvertedConcreteId, - SovereignAccountOf, - AccountId, - NoChecking, - (), - >, -); - -pub type XcmRouter = EnsureDecodableXcm>; -pub type Barrier = AllowUnpaidExecutionFrom; - -parameter_types! { - pub NftCollectionOne: AssetFilter - = Wild(AllOf { fun: WildNonFungible, id: AssetId((Parent, GeneralIndex(1)).into()) }); - pub NftCollectionOneForRelay: (AssetFilter, Location) - = (NftCollectionOne::get(), (Parent,).into()); -} -pub type TrustedTeleporters = xcm_builder::Case; -pub type TrustedReserves = EverythingBut>; - -pub struct XcmConfig; -impl Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = XcmOriginToCallOrigin; - type IsReserve = (NativeAsset, TrustedReserves); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = FixedRateOfFungible; - type ResponseHandler = (); - type AssetTrap = (); - type AssetLocker = PolkadotXcm; - type AssetExchanger = (); - type AssetClaims = (); - type SubscriptionService = (); - type PalletInstancesInfo = (); - type FeeManager = (); - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = FrameTransactionalProcessor; - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); -} - -#[frame_support::pallet] -pub mod mock_msg_queue { - use super::*; - use frame_support::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - type XcmExecutor: ExecuteXcm; - } - - #[pallet::call] - impl Pallet {} - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::storage] - #[pallet::getter(fn parachain_id)] - pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; - - #[pallet::storage] - #[pallet::getter(fn received_dmp)] - /// A queue of received DMP messages - pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; - - impl Get for Pallet { - fn get() -> ParaId { - Self::parachain_id() - } - } - - pub type MessageId = [u8; 32]; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - // XCMP - /// Some XCM was executed OK. - Success(Option), - /// Some XCM failed. - Fail(Option, XcmError), - /// Bad XCM version used. - BadVersion(Option), - /// Bad XCM format used. - BadFormat(Option), - - // DMP - /// Downward message is invalid XCM. - InvalidFormat(MessageId), - /// Downward message is unsupported version of XCM. - UnsupportedVersion(MessageId), - /// Downward message executed with the given outcome. - ExecutedDownward(MessageId, Outcome), - } - - impl Pallet { - pub fn set_para_id(para_id: ParaId) { - ParachainId::::put(para_id); - } - - fn handle_xcmp_message( - sender: ParaId, - _sent_at: RelayBlockNumber, - xcm: VersionedXcm, - max_weight: Weight, - ) -> Result { - let hash = Encode::using_encoded(&xcm, T::Hashing::hash); - let mut message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); - let (result, event) = match Xcm::::try_from(xcm) { - Ok(xcm) => { - let location = (Parent, Parachain(sender.into())); - match T::XcmExecutor::prepare_and_execute( - location, - xcm, - &mut message_hash, - max_weight, - Weight::zero(), - ) { - Outcome::Error { error } => (Err(error), Event::Fail(Some(hash), error)), - Outcome::Complete { used } => (Ok(used), Event::Success(Some(hash))), - // As far as the caller is concerned, this was dispatched without error, so - // we just report the weight used. - Outcome::Incomplete { used, error } => - (Ok(used), Event::Fail(Some(hash), error)), - } - }, - Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), - }; - Self::deposit_event(event); - result - } - } - - impl XcmpMessageHandler for Pallet { - fn handle_xcmp_messages<'a, I: Iterator>( - iter: I, - max_weight: Weight, - ) -> Weight { - for (sender, sent_at, data) in iter { - let mut data_ref = data; - let _ = XcmpMessageFormat::decode(&mut data_ref) - .expect("Simulator encodes with versioned xcm format; qed"); - - let mut remaining_fragments = data_ref; - while !remaining_fragments.is_empty() { - if let Ok(xcm) = - VersionedXcm::::decode(&mut remaining_fragments) - { - let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); - } else { - debug_assert!(false, "Invalid incoming XCMP message data"); - } - } - } - max_weight - } - } - - impl DmpMessageHandler for Pallet { - fn handle_dmp_messages( - iter: impl Iterator)>, - limit: Weight, - ) -> Weight { - for (_i, (_sent_at, data)) in iter.enumerate() { - let mut id = sp_io::hashing::blake2_256(&data[..]); - let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); - match maybe_versioned { - Err(_) => { - Self::deposit_event(Event::InvalidFormat(id)); - }, - Ok(versioned) => match Xcm::try_from(versioned) { - Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), - Ok(x) => { - let outcome = T::XcmExecutor::prepare_and_execute( - Parent, - x.clone(), - &mut id, - limit, - Weight::zero(), - ); - >::append(x); - Self::deposit_event(Event::ExecutedDownward(id, outcome)); - }, - }, - } - } - limit - } - } -} - -impl mock_msg_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} - -pub type LocalOriginToLocation = SignedToAccountId32; - -pub struct TrustedLockerCase(PhantomData); -impl> ContainsPair for TrustedLockerCase { - fn contains(origin: &Location, asset: &Asset) -> bool { - let (o, a) = T::get(); - a.matches(asset) && &o == origin - } -} - -parameter_types! { - pub RelayTokenForRelay: (Location, AssetFilter) = (Parent.into(), Wild(AllOf { id: AssetId(Parent.into()), fun: WildFungible })); -} - -pub type TrustedLockers = TrustedLockerCase; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - type ExecuteXcmOrigin = EnsureXcmOrigin; - type XcmExecuteFilter = Everything; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Nothing; - type XcmReserveTransferFilter = Everything; - type Weigher = FixedWeightBounds; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = TrustedLockers; - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); - type WeightInfo = pallet_xcm::TestWeightInfo; - type AdminOrigin = EnsureRoot; -} - -type Block = frame_system::mocking::MockBlock; - -construct_runtime!( - pub enum Runtime - { - System: frame_system, - Balances: pallet_balances, - MsgQueue: mock_msg_queue, - PolkadotXcm: pallet_xcm, - ForeignUniques: pallet_uniques, - } -); diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs new file mode 100644 index 00000000000..17cde921f3e --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs @@ -0,0 +1,185 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub use pallet::*; +use polkadot_core_primitives::BlockNumber as RelayBlockNumber; +use polkadot_parachain_primitives::primitives::{ + DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, +}; +use sp_runtime::traits::{Get, Hash}; +use xcm::{latest::prelude::*, VersionedXcm}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::storage] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + // XCMP + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + /// Get the Parachain Id. + pub fn parachain_id() -> ParaId { + ParachainId::::get() + } + + /// Set the Parachain Id. + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + /// Get the queue of receieved DMP messages. + pub fn received_dmp() -> Vec> { + ReceivedDmp::::get() + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let mut message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = (Parent, Parachain(sender.into())); + match T::XcmExecutor::prepare_and_execute( + location, + xcm, + &mut message_hash, + max_weight, + Weight::zero(), + ) { + Outcome::Error { error } => (Err(error), Event::Fail(Some(hash), error)), + Outcome::Complete { used } => (Ok(used), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete { used, error } => + (Ok(used), Event::Fail(Some(hash), error)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = data_ref; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = + VersionedXcm::::decode(&mut remaining_fragments) + { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let mut id = sp_io::hashing::blake2_256(&data[..]); + let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); + match maybe_versioned { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(versioned) => match Xcm::try_from(versioned) { + Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Ok(x) => { + let outcome = T::XcmExecutor::prepare_and_execute( + Parent, + x.clone(), + &mut id, + limit, + Weight::zero(), + ); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + }, + } + } + limit + } + } +} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs new file mode 100644 index 00000000000..8021f955165 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs @@ -0,0 +1,182 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +mod mock_msg_queue; +mod xcm_config; +pub use xcm_config::*; + +use core::marker::PhantomData; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, + traits::{ConstU128, ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, Nothing}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, +}; +use frame_system::EnsureRoot; +use sp_core::ConstU32; +use sp_runtime::{ + traits::{Get, IdentityLookup}, + AccountId32, +}; +use sp_std::prelude::*; +use xcm::latest::prelude::*; +use xcm_builder::{EnsureXcmOrigin, SignedToAccountId32}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; + +pub type AccountId = AccountId32; +pub type Balance = u128; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type ExistentialDeposit = ConstU128<1>; + type AccountStore = System; +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct UniquesHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_uniques::BenchmarkHelper for UniquesHelper { + fn collection(i: u16) -> Location { + GeneralIndex(i as u128).into() + } + fn item(i: u16) -> AssetInstance { + AssetInstance::Index(i as u128) + } +} + +impl pallet_uniques::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = Location; + type ItemId = AssetInstance; + type Currency = Balances; + type CreateOrigin = ForeignCreators; + type ForceOrigin = frame_system::EnsureRoot; + type CollectionDeposit = frame_support::traits::ConstU128<1_000>; + type ItemDeposit = frame_support::traits::ConstU128<1_000>; + type MetadataDepositBase = frame_support::traits::ConstU128<1_000>; + type AttributeDepositBase = frame_support::traits::ConstU128<1_000>; + type DepositPerByte = frame_support::traits::ConstU128<1>; + type StringLimit = ConstU32<64>; + type KeyLimit = ConstU32<64>; + type ValueLimit = ConstU32<128>; + type Locker = (); + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type Helper = UniquesHelper; +} + +// `EnsureOriginWithArg` impl for `CreateOrigin` which allows only XCM origins +// which are locations containing the class location. +pub struct ForeignCreators; +impl EnsureOriginWithArg for ForeignCreators { + type Success = AccountId; + + fn try_origin( + o: RuntimeOrigin, + a: &Location, + ) -> sp_std::result::Result { + let origin_location = pallet_xcm::EnsureXcm::::try_origin(o.clone())?; + if !a.starts_with(&origin_location) { + return Err(o); + } + xcm_config::location_converter::LocationConverter::convert_location(&origin_location) + .ok_or(o) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(a: &Location) -> Result { + Ok(pallet_xcm::Origin::Xcm(a.clone()).into()) + } +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); + pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); +} + +impl mock_msg_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = + SignedToAccountId32; + +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair for TrustedLockerCase { + fn contains(origin: &Location, asset: &Asset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +parameter_types! { + pub RelayTokenForRelay: (Location, AssetFilter) = (Parent.into(), Wild(AllOf { id: AssetId(Parent.into()), fun: WildFungible })); +} + +pub type TrustedLockers = TrustedLockerCase; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = weigher::Weigher; + type UniversalLocation = constants::UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = TrustedLockers; + type SovereignAccountOf = location_converter::LocationConverter; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub struct Runtime { + System: frame_system, + Balances: pallet_balances, + MsgQueue: mock_msg_queue, + PolkadotXcm: pallet_xcm, + ForeignUniques: pallet_uniques, + } +); diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs new file mode 100644 index 00000000000..25cffcf8cef --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/asset_transactor.rs @@ -0,0 +1,39 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::{ + constants::KsmLocation, location_converter::LocationConverter, AccountId, Balances, + ForeignUniques, +}; +use xcm::latest::prelude::*; +use xcm_builder::{ + ConvertedConcreteId, FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, +}; +use xcm_executor::traits::JustTry; + +type LocalAssetTransactor = ( + FungibleAdapter, LocationConverter, AccountId, ()>, + NonFungiblesAdapter< + ForeignUniques, + ConvertedConcreteId, + LocationConverter, + AccountId, + NoChecking, + (), + >, +); + +pub type AssetTransactor = LocalAssetTransactor; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs new file mode 100644 index 00000000000..1c7aa2c6d32 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/barrier.rs @@ -0,0 +1,20 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::traits::Everything; +use xcm_builder::AllowUnpaidExecutionFrom; + +pub type Barrier = AllowUnpaidExecutionFrom; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs new file mode 100644 index 00000000000..f6d0174def8 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::MsgQueue; +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub KsmPerSecondPerByte: (AssetId, u128, u128) = (AssetId(Parent.into()), 1, 1); + pub const MaxAssetsIntoHolding: u32 = 64; +} + +parameter_types! { + pub const KsmLocation: Location = Location::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Kusama; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); +} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs new file mode 100644 index 00000000000..5a54414dd13 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/location_converter.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::{constants::RelayNetwork, AccountId}; +use xcm_builder::{AccountId32Aliases, DescribeAllTerminal, DescribeFamily, HashedDescription}; + +type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +pub type LocationConverter = LocationToAccountId; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs new file mode 100644 index 00000000000..0ba02aab9bf --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/mod.rs @@ -0,0 +1,63 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod asset_transactor; +pub mod barrier; +pub mod constants; +pub mod location_converter; +pub mod origin_converter; +pub mod reserve; +pub mod teleporter; +pub mod weigher; + +use crate::parachain::{MsgQueue, PolkadotXcm, RuntimeCall}; +use frame_support::traits::{Everything, Nothing}; +use xcm_builder::{EnsureDecodableXcm, FixedRateOfFungible, FrameTransactionalProcessor}; + +// Generated from `decl_test_network!` +pub type XcmRouter = EnsureDecodableXcm>; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = origin_converter::OriginConverter; + type IsReserve = reserve::TrustedReserves; + type IsTeleporter = teleporter::TrustedTeleporters; + type UniversalLocation = constants::UniversalLocation; + type Barrier = barrier::Barrier; + type Weigher = weigher::Weigher; + type Trader = FixedRateOfFungible; + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = PolkadotXcm; + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = constants::MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); +} diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs new file mode 100644 index 00000000000..5a60f0e6001 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/origin_converter.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::{ + constants::RelayNetwork, location_converter::LocationConverter, RuntimeOrigin, +}; +use pallet_xcm::XcmPassthrough; +use xcm_builder::{SignedAccountId32AsNative, SovereignSignedViaLocation}; + +type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + SignedAccountId32AsNative, + XcmPassthrough, +); + +pub type OriginConverter = XcmOriginToCallOrigin; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs new file mode 100644 index 00000000000..8763a2f37cc --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/reserve.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::teleporter::TrustedTeleporters; +use frame_support::traits::EverythingBut; +use xcm_builder::NativeAsset; + +pub type TrustedReserves = (NativeAsset, EverythingBut); diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs new file mode 100644 index 00000000000..41cb7a5eb2d --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/teleporter.rs @@ -0,0 +1,27 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub NftCollectionOne: AssetFilter + = Wild(AllOf { fun: WildNonFungible, id: AssetId((Parent, GeneralIndex(1)).into()) }); + pub NftCollectionOneForRelay: (AssetFilter, Location) + = (NftCollectionOne::get(), (Parent,).into()); +} + +pub type TrustedTeleporters = xcm_builder::Case; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs new file mode 100644 index 00000000000..4bdc98ea3b0 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/weigher.rs @@ -0,0 +1,27 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::parachain::RuntimeCall; +use frame_support::parameter_types; +use xcm::latest::prelude::*; +use xcm_builder::FixedWeightBounds; + +parameter_types! { + pub const UnitWeightCost: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 100; +} + +pub type Weigher = FixedWeightBounds; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/mod.rs similarity index 53% rename from polkadot/xcm/xcm-simulator/example/src/relay_chain.rs rename to polkadot/xcm/xcm-simulator/example/src/relay_chain/mod.rs index b41df3cfa2b..f698eba41d4 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/mod.rs @@ -16,31 +16,29 @@ //! Relay chain runtime mock. +mod xcm_config; +pub use xcm_config::*; + use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{AsEnsureOriginWithArg, Everything, Nothing, ProcessMessage, ProcessMessageError}, + traits::{ + AsEnsureOriginWithArg, ConstU128, Everything, Nothing, ProcessMessage, ProcessMessageError, + }, weights::{Weight, WeightMeter}, }; use frame_system::EnsureRoot; -use sp_core::{ConstU32, H256}; +use sp_core::ConstU32; use sp_runtime::{traits::IdentityLookup, AccountId32}; -use polkadot_parachain_primitives::primitives::Id as ParaId; use polkadot_runtime_parachains::{ configuration, inclusion::{AggregateMessageOrigin, UmpQueueId}, origin, shared, }; use xcm::latest::prelude::*; -use xcm_builder::{ - Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, AsPrefixedGeneralIndex, - ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - ConvertedConcreteId, EnsureDecodableXcm, FixedRateOfFungible, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, -}; -use xcm_executor::{traits::JustTry, Config, XcmExecutor}; +use xcm_builder::{IsConcrete, SignedToAccountId32}; +use xcm_executor::XcmExecutor; pub type AccountId = AccountId32; pub type Balance = u128; @@ -51,51 +49,17 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = Everything; - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; -} - -parameter_types! { - pub ExistentialDeposit: Balance = 1; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { - type MaxLocks = MaxLocks; type Balance = Balance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; + type ExistentialDeposit = ConstU128<1>; type AccountStore = System; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxFreezes = ConstU32<0>; } impl pallet_uniques::Config for Runtime { @@ -127,83 +91,8 @@ impl configuration::Config for Runtime { type WeightInfo = configuration::TestWeightInfo; } -parameter_types! { - pub const TokenLocation: Location = Here.into_location(); - pub RelayNetwork: NetworkId = ByGenesis([0; 32]); - pub const AnyNetwork: Option = None; - pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); - pub UnitWeightCost: u64 = 1_000; -} - -pub type LocationToAccountId = ( - ChildParachainConvertsVia, - AccountId32Aliases, - Account32Hash<(), AccountId>, -); - -pub type LocalAssetTransactor = ( - FungibleAdapter, LocationToAccountId, AccountId, ()>, - NonFungiblesAdapter< - Uniques, - ConvertedConcreteId, JustTry>, - LocationToAccountId, - AccountId, - NoChecking, - (), - >, -); - -type LocalOriginConverter = ( - SovereignSignedViaLocation, - ChildParachainAsNative, - SignedAccountId32AsNative, - ChildSystemParachainAsSuperuser, -); - -parameter_types! { - pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); - pub TokensPerSecondPerByte: (AssetId, u128, u128) = - (AssetId(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; -} - -pub type XcmRouter = EnsureDecodableXcm; -pub type Barrier = AllowUnpaidExecutionFrom; - -pub struct XcmConfig; -impl Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = LocalOriginConverter; - type IsReserve = (); - type IsTeleporter = (); - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = FixedRateOfFungible; - type ResponseHandler = (); - type AssetTrap = (); - type AssetLocker = XcmPallet; - type AssetExchanger = (); - type AssetClaims = (); - type SubscriptionService = (); - type PalletInstancesInfo = (); - type FeeManager = (); - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; - type TransactionalProcessor = FrameTransactionalProcessor; - type HrmpNewChannelOpenRequestHandler = (); - type HrmpChannelAcceptedHandler = (); - type HrmpChannelClosingHandler = (); -} - -pub type LocalOriginToLocation = SignedToAccountId32; +pub type LocalOriginToLocation = + SignedToAccountId32; impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -215,16 +104,16 @@ impl pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Everything; - type Weigher = FixedWeightBounds; - type UniversalLocation = UniversalLocation; + type Weigher = weigher::Weigher; + type UniversalLocation = constants::UniversalLocation; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type Currency = Balances; - type CurrencyMatcher = IsConcrete; + type CurrencyMatcher = IsConcrete; type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; + type SovereignAccountOf = location_converter::LocationConverter; type MaxLockers = ConstU32<8>; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs new file mode 100644 index 00000000000..c212569d481 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/asset_transactor.rs @@ -0,0 +1,38 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::{ + constants::TokenLocation, location_converter::LocationConverter, AccountId, Balances, Uniques, +}; +use xcm_builder::{ + AsPrefixedGeneralIndex, ConvertedConcreteId, FungibleAdapter, IsConcrete, NoChecking, + NonFungiblesAdapter, +}; +use xcm_executor::traits::JustTry; + +type LocalAssetTransactor = ( + FungibleAdapter, LocationConverter, AccountId, ()>, + NonFungiblesAdapter< + Uniques, + ConvertedConcreteId, JustTry>, + LocationConverter, + AccountId, + NoChecking, + (), + >, +); + +pub type AssetTransactor = LocalAssetTransactor; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs new file mode 100644 index 00000000000..1c7aa2c6d32 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/barrier.rs @@ -0,0 +1,20 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::traits::Everything; +use xcm_builder::AllowUnpaidExecutionFrom; + +pub type Barrier = AllowUnpaidExecutionFrom; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs new file mode 100644 index 00000000000..f590c42990d --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/constants.rs @@ -0,0 +1,31 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::parameter_types; +use xcm::latest::prelude::*; + +parameter_types! { + pub TokensPerSecondPerByte: (AssetId, u128, u128) = + (AssetId(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); + pub const MaxAssetsIntoHolding: u32 = 64; +} + +parameter_types! { + pub const TokenLocation: Location = Here.into_location(); + pub RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); + pub UnitWeightCost: u64 = 1_000; +} diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs new file mode 100644 index 00000000000..0f5f4e43dc9 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/location_converter.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::{constants::RelayNetwork, AccountId}; +use xcm_builder::{AccountId32Aliases, DescribeAllTerminal, DescribeFamily, HashedDescription}; + +type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +pub type LocationConverter = LocationToAccountId; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs new file mode 100644 index 00000000000..a7a8bae5156 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/mod.rs @@ -0,0 +1,62 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod asset_transactor; +pub mod barrier; +pub mod constants; +pub mod location_converter; +pub mod origin_converter; +pub mod weigher; + +use crate::relay_chain::{RuntimeCall, XcmPallet}; +use frame_support::traits::{Everything, Nothing}; +use xcm_builder::{EnsureDecodableXcm, FixedRateOfFungible, FrameTransactionalProcessor}; +use xcm_executor::Config; + +// Generated from `decl_test_network!` +pub type XcmRouter = EnsureDecodableXcm; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = origin_converter::OriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = constants::UniversalLocation; + type Barrier = barrier::Barrier; + type Weigher = weigher::Weigher; + type Trader = FixedRateOfFungible; + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = XcmPallet; + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = constants::MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); +} diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs new file mode 100644 index 00000000000..3c79912a926 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/origin_converter.rs @@ -0,0 +1,34 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::{ + constants::RelayNetwork, location_converter::LocationConverter, RuntimeOrigin, +}; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use polkadot_runtime_parachains::origin; +use xcm_builder::{ + ChildParachainAsNative, ChildSystemParachainAsSuperuser, SignedAccountId32AsNative, + SovereignSignedViaLocation, +}; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +pub type OriginConverter = LocalOriginConverter; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs new file mode 100644 index 00000000000..5c02565f460 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain/xcm_config/weigher.rs @@ -0,0 +1,27 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::relay_chain::RuntimeCall; +use frame_support::parameter_types; +use xcm::latest::prelude::*; +use xcm_builder::FixedWeightBounds; + +parameter_types! { + pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); + pub const MaxInstructions: u32 = 100; +} + +pub type Weigher = FixedWeightBounds; diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs new file mode 100644 index 00000000000..6486a849af3 --- /dev/null +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -0,0 +1,513 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::*; + +use codec::Encode; +use frame_support::{assert_ok, weights::Weight}; +use xcm::latest::QueryResponseInfo; +use xcm_simulator::TestExt; + +// Helper function for forming buy execution message +fn buy_execution(fees: impl Into) -> Instruction { + BuyExecution { fees: fees.into(), weight_limit: Unlimited } +} + +#[test] +fn remote_account_ids_work() { + child_account_account_id(1, ALICE); + sibling_account_account_id(1, ALICE); + parent_account_account_id(ALICE); +} + +#[test] +fn dmp() { + MockNet::reset(); + + let remark = parachain::RuntimeCall::System( + frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, + ); + Relay::execute_with(|| { + assert_ok!(RelayChainPalletXcm::send_xcm( + Here, + Parachain(1), + Xcm(vec![Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), + call: remark.encode().into(), + }]), + )); + }); + + ParaA::execute_with(|| { + use parachain::{RuntimeEvent, System}; + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::System(frame_system::Event::Remarked { .. }) + ))); + }); +} + +#[test] +fn ump() { + MockNet::reset(); + + let remark = relay_chain::RuntimeCall::System( + frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, + ); + ParaA::execute_with(|| { + assert_ok!(ParachainPalletXcm::send_xcm( + Here, + Parent, + Xcm(vec![Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), + call: remark.encode().into(), + }]), + )); + }); + + Relay::execute_with(|| { + use relay_chain::{RuntimeEvent, System}; + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::System(frame_system::Event::Remarked { .. }) + ))); + }); +} + +#[test] +fn xcmp() { + MockNet::reset(); + + let remark = parachain::RuntimeCall::System( + frame_system::Call::::remark_with_event { remark: vec![1, 2, 3] }, + ); + ParaA::execute_with(|| { + assert_ok!(ParachainPalletXcm::send_xcm( + Here, + (Parent, Parachain(2)), + Xcm(vec![Transact { + origin_kind: OriginKind::SovereignAccount, + require_weight_at_most: Weight::from_parts(INITIAL_BALANCE as u64, 1024 * 1024), + call: remark.encode().into(), + }]), + )); + }); + + ParaB::execute_with(|| { + use parachain::{RuntimeEvent, System}; + assert!(System::events().iter().any(|r| matches!( + r.event, + RuntimeEvent::System(frame_system::Event::Remarked { .. }) + ))); + }); +} + +#[test] +fn reserve_transfer() { + MockNet::reset(); + + let withdraw_amount = 123; + + Relay::execute_with(|| { + assert_ok!(RelayChainPalletXcm::limited_reserve_transfer_assets( + relay_chain::RuntimeOrigin::signed(ALICE), + Box::new(Parachain(1).into()), + Box::new(AccountId32 { network: None, id: ALICE.into() }.into()), + Box::new((Here, withdraw_amount).into()), + 0, + Unlimited, + )); + assert_eq!( + relay_chain::Balances::free_balance(&child_account_id(1)), + INITIAL_BALANCE + withdraw_amount + ); + }); + + ParaA::execute_with(|| { + // free execution, full amount received + assert_eq!( + pallet_balances::Pallet::::free_balance(&ALICE), + INITIAL_BALANCE + withdraw_amount + ); + }); +} + +#[test] +fn remote_locking_and_unlocking() { + MockNet::reset(); + + let locked_amount = 100; + + ParaB::execute_with(|| { + let message = Xcm(vec![LockAsset { + asset: (Here, locked_amount).into(), + unlocker: Parachain(1).into(), + }]); + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); + }); + + Relay::execute_with(|| { + use pallet_balances::{BalanceLock, Reasons}; + assert_eq!( + relay_chain::Balances::locks(&child_account_id(2)), + vec![BalanceLock { id: *b"py/xcmlk", amount: locked_amount, reasons: Reasons::All }] + ); + }); + + ParaA::execute_with(|| { + assert_eq!( + parachain::MsgQueue::received_dmp(), + vec![Xcm(vec![NoteUnlockable { + owner: (Parent, Parachain(2)).into(), + asset: (Parent, locked_amount).into() + }])] + ); + }); + + ParaB::execute_with(|| { + // Request unlocking part of the funds on the relay chain + let message = Xcm(vec![RequestUnlock { + asset: (Parent, locked_amount - 50).into(), + locker: Parent.into(), + }]); + assert_ok!(ParachainPalletXcm::send_xcm(Here, (Parent, Parachain(1)), message)); + }); + + Relay::execute_with(|| { + use pallet_balances::{BalanceLock, Reasons}; + // Lock is reduced + assert_eq!( + relay_chain::Balances::locks(&child_account_id(2)), + vec![BalanceLock { + id: *b"py/xcmlk", + amount: locked_amount - 50, + reasons: Reasons::All + }] + ); + }); +} + +/// Scenario: +/// A parachain transfers an NFT resident on the relay chain to another parachain account. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn withdraw_and_deposit_nft() { + MockNet::reset(); + + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(1))); + }); + + ParaA::execute_with(|| { + let message = Xcm(vec![TransferAsset { + assets: (GeneralIndex(1), 42u32).into(), + beneficiary: Parachain(2).into(), + }]); + // Send withdraw and deposit + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message)); + }); + + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(1, 42), Some(child_account_id(2))); + }); +} + +/// Scenario: +/// The relay-chain teleports an NFT to a parachain. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn teleport_nft() { + MockNet::reset(); + + Relay::execute_with(|| { + // Mint the NFT (1, 69) and give it to our "parachain#1 alias". + assert_ok!(relay_chain::Uniques::mint( + relay_chain::RuntimeOrigin::signed(ALICE), + 1, + 69, + child_account_account_id(1, ALICE), + )); + // The parachain#1 alias of Alice is what must hold it on the Relay-chain for it to be + // withdrawable by Alice on the parachain. + assert_eq!(relay_chain::Uniques::owner(1, 69), Some(child_account_account_id(1, ALICE))); + }); + ParaA::execute_with(|| { + assert_ok!(parachain::ForeignUniques::force_create( + parachain::RuntimeOrigin::root(), + (Parent, GeneralIndex(1)).into(), + ALICE, + false, + )); + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), + None, + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); + + // IRL Alice would probably just execute this locally on the Relay-chain, but we can't + // easily do that here since we only send between chains. + let message = Xcm(vec![ + WithdrawAsset((GeneralIndex(1), 69u32).into()), + InitiateTeleport { + assets: AllCounted(1).into(), + dest: Parachain(1).into(), + xcm: Xcm(vec![DepositAsset { + assets: AllCounted(1).into(), + beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), + }]), + }, + ]); + // Send teleport + let alice = AccountId32 { id: ALICE.into(), network: None }; + assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); + }); + ParaA::execute_with(|| { + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(1)).into(), 69u32.into()), + Some(ALICE), + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); + }); + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(1, 69), None); + }); +} + +/// Scenario: +/// The relay-chain transfers an NFT into a parachain's sovereign account, who then mints a +/// trustless-backed-derived locally. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn reserve_asset_transfer_nft() { + sp_tracing::init_for_tests(); + MockNet::reset(); + + Relay::execute_with(|| { + assert_ok!(relay_chain::Uniques::force_create( + relay_chain::RuntimeOrigin::root(), + 2, + ALICE, + false + )); + assert_ok!(relay_chain::Uniques::mint( + relay_chain::RuntimeOrigin::signed(ALICE), + 2, + 69, + child_account_account_id(1, ALICE) + )); + assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_account_id(1, ALICE))); + }); + ParaA::execute_with(|| { + assert_ok!(parachain::ForeignUniques::force_create( + parachain::RuntimeOrigin::root(), + (Parent, GeneralIndex(2)).into(), + ALICE, + false, + )); + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), + None, + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 0); + + let message = Xcm(vec![ + WithdrawAsset((GeneralIndex(2), 69u32).into()), + DepositReserveAsset { + assets: AllCounted(1).into(), + dest: Parachain(1).into(), + xcm: Xcm(vec![DepositAsset { + assets: AllCounted(1).into(), + beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), + }]), + }, + ]); + // Send transfer + let alice = AccountId32 { id: ALICE.into(), network: None }; + assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); + }); + ParaA::execute_with(|| { + log::debug!(target: "xcm-executor", "Hello"); + assert_eq!( + parachain::ForeignUniques::owner((Parent, GeneralIndex(2)).into(), 69u32.into()), + Some(ALICE), + ); + assert_eq!(parachain::Balances::reserved_balance(&ALICE), 1000); + }); + + Relay::execute_with(|| { + assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_id(1))); + }); +} + +/// Scenario: +/// The relay-chain creates an asset class on a parachain and then Alice transfers her NFT into +/// that parachain's sovereign account, who then mints a trustless-backed-derivative locally. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn reserve_asset_class_create_and_reserve_transfer() { + MockNet::reset(); + + Relay::execute_with(|| { + assert_ok!(relay_chain::Uniques::force_create( + relay_chain::RuntimeOrigin::root(), + 2, + ALICE, + false + )); + assert_ok!(relay_chain::Uniques::mint( + relay_chain::RuntimeOrigin::signed(ALICE), + 2, + 69, + child_account_account_id(1, ALICE) + )); + assert_eq!(relay_chain::Uniques::owner(2, 69), Some(child_account_account_id(1, ALICE))); + + let message = Xcm(vec![Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: Weight::from_parts(1_000_000_000, 1024 * 1024), + call: parachain::RuntimeCall::from( + pallet_uniques::Call::::create { + collection: (Parent, 2u64).into(), + admin: parent_account_id(), + }, + ) + .encode() + .into(), + }]); + // Send creation. + assert_ok!(RelayChainPalletXcm::send_xcm(Here, Parachain(1), message)); + }); + ParaA::execute_with(|| { + // Then transfer + let message = Xcm(vec![ + WithdrawAsset((GeneralIndex(2), 69u32).into()), + DepositReserveAsset { + assets: AllCounted(1).into(), + dest: Parachain(1).into(), + xcm: Xcm(vec![DepositAsset { + assets: AllCounted(1).into(), + beneficiary: (AccountId32 { id: ALICE.into(), network: None },).into(), + }]), + }, + ]); + let alice = AccountId32 { id: ALICE.into(), network: None }; + assert_ok!(ParachainPalletXcm::send_xcm(alice, Parent, message)); + }); + ParaA::execute_with(|| { + assert_eq!(parachain::Balances::reserved_balance(&parent_account_id()), 1000); + assert_eq!( + parachain::ForeignUniques::collection_owner((Parent, 2u64).into()), + Some(parent_account_id()) + ); + }); +} + +/// Scenario: +/// A parachain transfers funds on the relay chain to another parachain account. +/// +/// Asserts that the parachain accounts are updated as expected. +#[test] +fn withdraw_and_deposit() { + MockNet::reset(); + + let send_amount = 10; + + ParaA::execute_with(|| { + let message = Xcm(vec![ + WithdrawAsset((Here, send_amount).into()), + buy_execution((Here, send_amount)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, + ]); + // Send withdraw and deposit + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone())); + }); + + Relay::execute_with(|| { + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(1)), + INITIAL_BALANCE - send_amount + ); + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(2)), + INITIAL_BALANCE + send_amount + ); + }); +} + +/// Scenario: +/// A parachain wants to be notified that a transfer worked correctly. +/// It sends a `QueryHolding` after the deposit to get notified on success. +/// +/// Asserts that the balances are updated correctly and the expected XCM is sent. +#[test] +fn query_holding() { + MockNet::reset(); + + let send_amount = 10; + let query_id_set = 1234; + + // Send a message which fully succeeds on the relay chain + ParaA::execute_with(|| { + let message = Xcm(vec![ + WithdrawAsset((Here, send_amount).into()), + buy_execution((Here, send_amount)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: Parachain(2).into() }, + ReportHolding { + response_info: QueryResponseInfo { + destination: Parachain(1).into(), + query_id: query_id_set, + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + }, + assets: All.into(), + }, + ]); + // Send withdraw and deposit with query holding + assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone(),)); + }); + + // Check that transfer was executed + Relay::execute_with(|| { + // Withdraw executed + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(1)), + INITIAL_BALANCE - send_amount + ); + // Deposit executed + assert_eq!( + relay_chain::Balances::free_balance(child_account_id(2)), + INITIAL_BALANCE + send_amount + ); + }); + + // Check that QueryResponse message was received + ParaA::execute_with(|| { + assert_eq!( + parachain::MsgQueue::received_dmp(), + vec![Xcm(vec![QueryResponse { + query_id: query_id_set, + response: Response::Assets(Assets::new()), + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + querier: Some(Here.into()), + }])], + ); + }); +} diff --git a/prdoc/pr_4220.prdoc b/prdoc/pr_4220.prdoc new file mode 100644 index 00000000000..d5688ab325c --- /dev/null +++ b/prdoc/pr_4220.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Refactor XCM Simulator Example + +doc: + - audience: Runtime Dev + description: | + This PR refactors the XCM Simulator Example to improve developer experience when trying to read and understand the example. 3 monolithic files have been broken down into their respective components across various modules. No major logical changes were made. + +crates: [ ] -- GitLab From 1fb058b791276f1ced9c1980e738e968d5dafb45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Andr=C3=A9s=20Dorado=20Su=C3=A1rez?= Date: Tue, 30 Apr 2024 00:06:17 -0500 Subject: [PATCH 093/107] Assets Events for `Deposited` and `Withdrawn` (#4312) Closes #4308 Polkadot address: 12gMhxHw8QjEwLQvnqsmMVY1z5gFa54vND74aMUbhhwN6mJR --------- Co-authored-by: command-bot <> Co-authored-by: Francisco Aguirre --- prdoc/pr_4312.prdoc | 19 +++++++++++++ substrate/frame/assets/src/impl_fungibles.rs | 16 +++++++++++ substrate/frame/assets/src/lib.rs | 4 +++ substrate/frame/assets/src/tests/sets.rs | 12 ++++++++ .../asset-conversion-tx-payment/src/tests.rs | 22 +++++++++++++++ .../asset-tx-payment/src/tests.rs | 28 +++++++++++++++++++ 6 files changed, 101 insertions(+) create mode 100644 prdoc/pr_4312.prdoc diff --git a/prdoc/pr_4312.prdoc b/prdoc/pr_4312.prdoc new file mode 100644 index 00000000000..d773edbd14d --- /dev/null +++ b/prdoc/pr_4312.prdoc @@ -0,0 +1,19 @@ +title: Add `Deposited`/`Withdrawn` events for `pallet-assets` + +doc: + - audience: Runtime Dev + description: | + New events were added to `pallet-assets`: `Deposited` and `Withdrawn`. Make sure + to cover those events on tests if necessary. + - audience: Runtime User + description: | + New events were added to `pallet-assets`: `Deposited` and `Withdrawn`. These indicate + a change in the balance of an account. + +crates: + - name: pallet-assets + bump: minor + - name: pallet-asset-tx-payment + bump: minor + - name: pallet-asset-conversion-tx-payment + bump: minor diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs index 9f837a60434..30122f6d788 100644 --- a/substrate/frame/assets/src/impl_fungibles.rs +++ b/substrate/frame/assets/src/impl_fungibles.rs @@ -118,6 +118,22 @@ impl, I: 'static> fungibles::Balanced<::AccountI { type OnDropCredit = fungibles::DecreaseIssuance; type OnDropDebt = fungibles::IncreaseIssuance; + + fn done_deposit( + asset_id: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) { + Self::deposit_event(Event::Deposited { asset_id, who: who.clone(), amount }) + } + + fn done_withdraw( + asset_id: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) { + Self::deposit_event(Event::Withdrawn { asset_id, who: who.clone(), amount }) + } } impl, I: 'static> fungibles::Unbalanced for Pallet { diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 9056b1eefbd..d5214922555 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -571,6 +571,10 @@ pub mod pallet { Touched { asset_id: T::AssetId, who: T::AccountId, depositor: T::AccountId }, /// Some account `who` was blocked. Blocked { asset_id: T::AssetId, who: T::AccountId }, + /// Some assets were deposited (e.g. for transaction fees). + Deposited { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance }, + /// Some assets were withdrawn from the account (e.g. for transaction fees). + Withdrawn { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance }, } #[pallet::error] diff --git a/substrate/frame/assets/src/tests/sets.rs b/substrate/frame/assets/src/tests/sets.rs index f85a736c083..4d75b8aeab2 100644 --- a/substrate/frame/assets/src/tests/sets.rs +++ b/substrate/frame/assets/src/tests/sets.rs @@ -90,6 +90,12 @@ fn deposit_from_set_types_works() { assert_eq!(First::::balance((), &account2), 50); assert_eq!(First::::total_issuance(()), 100); + System::assert_has_event(RuntimeEvent::Assets(crate::Event::Deposited { + asset_id: asset1, + who: account2, + amount: 50, + })); + assert_eq!(imb.peek(), 50); let (imb1, imb2) = imb.split(30); @@ -336,6 +342,12 @@ fn withdraw_from_set_types_works() { assert_eq!(First::::balance((), &account2), 50); assert_eq!(First::::total_issuance(()), 200); + System::assert_has_event(RuntimeEvent::Assets(crate::Event::Withdrawn { + asset_id: asset1, + who: account2, + amount: 50, + })); + assert_eq!(imb.peek(), 50); drop(imb); assert_eq!(First::::total_issuance(()), 150); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 62faed269d3..aa2f26f3a6a 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -201,6 +201,8 @@ fn transaction_payment_in_asset_possible() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -246,6 +248,12 @@ fn transaction_payment_in_asset_possible() { // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee_in_asset, + })); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), &info_from_weight(WEIGHT_5), // estimated tx weight @@ -385,6 +393,8 @@ fn asset_transaction_payment_with_tip_and_refund() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -434,6 +444,12 @@ fn asset_transaction_payment_with_tip_and_refund() { ) .unwrap(); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee_in_asset, + })); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), &info_from_weight(WEIGHT_100), @@ -451,6 +467,12 @@ fn asset_transaction_payment_with_tip_and_refund() { balance - fee_in_asset + expected_token_refund ); assert_eq!(Balances::free_balance(caller), 20 * balance_factor); + + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited { + asset_id, + who: caller, + amount: expected_token_refund, + })); }); } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 8df98ceda99..098ecf11dd9 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -157,6 +157,8 @@ fn transaction_payment_in_asset_possible() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -188,6 +190,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee, + })); + assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), &info_from_weight(Weight::from_parts(weight, 0)), @@ -198,6 +206,12 @@ fn transaction_payment_in_asset_possible() { assert_eq!(Assets::balance(asset_id, caller), balance - fee); // check that the block author gets rewarded assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), fee); + + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited { + asset_id, + who: BLOCK_AUTHOR, + amount: fee, + })); }); } @@ -263,6 +277,8 @@ fn asset_transaction_payment_with_tip_and_refund() { .base_weight(Weight::from_parts(base_weight, 0)) .build() .execute_with(|| { + System::set_block_number(1); + // create the asset let asset_id = 1; let min_balance = 2; @@ -292,6 +308,12 @@ fn asset_transaction_payment_with_tip_and_refund() { .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { + asset_id, + who: caller, + amount: fee_with_tip, + })); + let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), @@ -304,6 +326,12 @@ fn asset_transaction_payment_with_tip_and_refund() { fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee)); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee); + + System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited { + asset_id, + who: caller, + amount: fee_with_tip - final_fee, + })); }); } -- GitLab From 31dc8bb1de9a73c57863c4698ea23559ef729f67 Mon Sep 17 00:00:00 2001 From: gupnik Date: Tue, 30 Apr 2024 11:09:08 +0530 Subject: [PATCH 094/107] Improvements in minimal template (#4119) This PR makes a few improvements in the docs for the minimal template. --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 13 +++ Cargo.toml | 1 + .../src/construct_runtime/expand/call.rs | 1 + .../procedural/src/construct_runtime/mod.rs | 2 + .../procedural/src/construct_runtime/parse.rs | 3 + .../src/pallet/expand/tt_default_parts.rs | 4 +- .../procedural/src/runtime/expand/mod.rs | 4 +- .../procedural/src/runtime/parse/pallet.rs | 4 + .../src/runtime/parse/pallet_decl.rs | 7 +- templates/minimal/Cargo.toml | 25 ++++++ templates/minimal/README.md | 13 +++ templates/minimal/runtime/src/lib.rs | 79 +++++++++++++++---- templates/minimal/src/lib.rs | 75 ++++++++++++++++++ 13 files changed, 210 insertions(+), 21 deletions(-) create mode 100644 templates/minimal/Cargo.toml create mode 100644 templates/minimal/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 67b0ad4def2..1fe4012070a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8474,6 +8474,19 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "minimal-template" +version = "0.0.0" +dependencies = [ + "docify", + "minimal-template-node", + "minimal-template-runtime", + "pallet-minimal-template", + "polkadot-sdk-docs", + "polkadot-sdk-frame", + "simple-mermaid", +] + [[package]] name = "minimal-template-node" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 42a6bc8abe1..1d3f3d8e9ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -513,6 +513,7 @@ members = [ "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", + "templates/minimal", "templates/minimal/node", "templates/minimal/pallets/template", "templates/minimal/runtime", diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index b0041ccc075..f055e8ce28e 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -66,6 +66,7 @@ pub fn expand_outer_dispatch( quote! { #( #query_call_part_macros )* + /// The aggregated runtime call type. #[derive( Clone, PartialEq, Eq, #scrate::__private::codec::Encode, diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index b083abbb2a8..1505d158895 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -533,6 +533,7 @@ pub(crate) fn decl_all_pallets<'a>( for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; + let docs = &pallet_declaration.docs; let mut generics = vec![quote!(#runtime)]; generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); let mut attrs = Vec::new(); @@ -541,6 +542,7 @@ pub(crate) fn decl_all_pallets<'a>( attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed")); } let type_decl = quote!( + #( #[doc = #docs] )* #(#attrs)* pub type #type_name = #pallet::Pallet <#(#generics),*>; ); diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 31866c787b0..ded77bed4c8 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -605,6 +605,8 @@ pub struct Pallet { pub pallet_parts: Vec, /// Expressions specified inside of a #[cfg] attribute. pub cfg_pattern: Vec, + /// The doc literals + pub docs: Vec, } impl Pallet { @@ -774,6 +776,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?; diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 99364aaa96c..1975f059152 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -198,9 +198,9 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #default_parts_unique_id_v2 { { $caller:tt - frame_support = [{ $($frame_support:ident)::* }] + your_tt_return = [{ $my_tt_return:path }] } => { - $($frame_support)*::__private::tt_return! { + $my_tt_return! { $caller tokens = [{ + Pallet #call_part_v2 #storage_part_v2 #event_part_v2 #error_part_v2 #origin_part_v2 #config_part_v2 diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 011f69f3714..43f11896808 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -93,7 +93,7 @@ fn construct_runtime_implicit_to_explicit( let frame_support = generate_access_from_frame_or_crate("frame-support")?; let attr = if legacy_ordering { quote!((legacy_ordering)) } else { quote!() }; let mut expansion = quote::quote!( - #[frame_support::runtime #attr] + #[#frame_support::runtime #attr] #input ); for pallet in definition.pallet_decls.iter() { @@ -103,7 +103,7 @@ fn construct_runtime_implicit_to_explicit( expansion = quote::quote!( #frame_support::__private::tt_call! { macro = [{ #pallet_path::tt_default_parts_v2 }] - frame_support = [{ #frame_support }] + your_tt_return = [{ #frame_support::__private::tt_return }] ~~> #frame_support::match_and_insert! { target = [{ #expansion }] pattern = [{ #pallet_name = #pallet_path #pallet_instance }] diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs index d2f1857fb2b..09f5290541d 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath}; +use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments}; @@ -86,6 +87,8 @@ impl Pallet { let cfg_pattern = vec![]; + let docs = get_doc_literals(&item.attrs); + Ok(Pallet { is_expanded: true, name, @@ -94,6 +97,7 @@ impl Pallet { instance, cfg_pattern, pallet_parts, + docs, }) } } diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs index 437a163cfbc..e167d37d5f1 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs @@ -21,13 +21,14 @@ use syn::{spanned::Spanned, Attribute, Ident, PathArguments}; /// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { - /// The name of the pallet, e.g.`System` in `System: frame_system`. + /// The name of the pallet, e.g.`System` in `pub type System = frame_system`. pub name: Ident, /// Optional attributes tagged right above a pallet declaration. pub attrs: Vec, - /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. + /// The path of the pallet, e.g. `frame_system` in `pub type System = frame_system`. pub path: syn::Path, - /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. + /// The instance of the pallet, e.g. `Instance1` in `pub type Council = + /// pallet_collective`. pub instance: Option, } diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml new file mode 100644 index 00000000000..6cd28c5a493 --- /dev/null +++ b/templates/minimal/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "minimal-template" +description = "A minimal template built with Substrate, part of Polkadot Sdk." +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +minimal-template-node = { path = "./node" } +minimal-template-runtime = { path = "./runtime" } +pallet-minimal-template = { path = "./pallets/template" } +polkadot-sdk-docs = { path = "../../docs/sdk" } + +frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame" } + +# How we build docs in rust-docs +simple-mermaid = "0.1.1" +docify = "0.2.7" diff --git a/templates/minimal/README.md b/templates/minimal/README.md index e69de29bb2d..0541e393db9 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -0,0 +1,13 @@ +# Minimal Template + +This is a minimal template for creating a blockchain using the Polkadot SDK. + +# Docs + +You can generate and view the [Rust +Docs](https://doc.rust-lang.org/cargo/commands/cargo-doc.html) for this template +with this command: + +```sh +cargo doc -p minimal-template --open +``` diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 794f30a054a..d2debbf5689 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! A minimal runtime that includes the template [`pallet`](`pallet_minimal_template`). + #![cfg_attr(not(feature = "std"), no_std)] // Make the WASM binary available. @@ -24,6 +26,7 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use frame::{ deps::frame_support::{ genesis_builder_helper::{build_state, get_preset}, + runtime, weights::{FixedFee, NoFee}, }, prelude::*, @@ -36,6 +39,7 @@ use frame::{ }, }; +/// The runtime version. #[runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("minimal-template-runtime"), @@ -54,61 +58,108 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// The signed extensions that are added to the runtime. type SignedExtra = ( + // Checks that the sender is not the zero address. frame_system::CheckNonZeroSender, + // Checks that the runtime version is correct. frame_system::CheckSpecVersion, + // Checks that the transaction version is correct. frame_system::CheckTxVersion, + // Checks that the genesis hash is correct. frame_system::CheckGenesis, + // Checks that the era is valid. frame_system::CheckEra, + // Checks that the nonce is valid. frame_system::CheckNonce, + // Checks that the weight is valid. frame_system::CheckWeight, + // Ensures that the sender has enough funds to pay for the transaction + // and deducts the fee from the sender's account. pallet_transaction_payment::ChargeTransactionPayment, ); -construct_runtime!( - pub enum Runtime { - System: frame_system, - Timestamp: pallet_timestamp, - - Balances: pallet_balances, - Sudo: pallet_sudo, - TransactionPayment: pallet_transaction_payment, - - // our local pallet - Template: pallet_minimal_template, - } -); +// Composes the runtime by adding all the used pallets and deriving necessary types. +#[runtime] +mod runtime { + /// The main runtime type. + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask + )] + pub struct Runtime; + + /// Mandatory system pallet that should always be included in a FRAME runtime. + #[runtime::pallet_index(0)] + pub type System = frame_system; + + /// Provides a way for consensus systems to set and check the onchain time. + #[runtime::pallet_index(1)] + pub type Timestamp = pallet_timestamp; + + /// Provides the ability to keep track of balances. + #[runtime::pallet_index(2)] + pub type Balances = pallet_balances; + + /// Provides a way to execute privileged functions. + #[runtime::pallet_index(3)] + pub type Sudo = pallet_sudo; + + /// Provides the ability to charge for extrinsic execution. + #[runtime::pallet_index(4)] + pub type TransactionPayment = pallet_transaction_payment; + + /// A minimal pallet template. + #[runtime::pallet_index(5)] + pub type Template = pallet_minimal_template; +} parameter_types! { pub const Version: RuntimeVersion = VERSION; } +/// Implements the types required for the system pallet. #[derive_impl(frame_system::config_preludes::SolochainDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; type Version = Version; - type BlockHashCount = ConstU32<1024>; + // Use the account data from the balances pallet type AccountData = pallet_balances::AccountData<::Balance>; } +// Implements the types required for the balances pallet. #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type AccountStore = System; } +// Implements the types required for the sudo pallet. #[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig)] impl pallet_sudo::Config for Runtime {} +// Implements the types required for the sudo pallet. #[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)] impl pallet_timestamp::Config for Runtime {} +// Implements the types required for the transaction payment pallet. #[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter; + // Setting fee as independent of the weight of the extrinsic for demo purposes type WeightToFee = NoFee<::Balance>; + // Setting fee as fixed for any length of the call data for demo purposes type LengthToFee = FixedFee<1, ::Balance>; } +// Implements the types required for the template pallet. impl pallet_minimal_template::Config for Runtime {} type Block = frame::runtime::types_common::BlockOf; diff --git a/templates/minimal/src/lib.rs b/templates/minimal/src/lib.rs new file mode 100644 index 00000000000..68825d190bb --- /dev/null +++ b/templates/minimal/src/lib.rs @@ -0,0 +1,75 @@ +//! # Minimal Template +//! +//! This is a minimal template for creating a blockchain using the Polkadot SDK. +//! +//! ## Components +//! +//! The template consists of the following components: +//! +//! ### Node +//! +//! A minimal blockchain [`node`](`minimal_template_node`) that is capable of running a +//! runtime. It uses a simple chain specification, provides an option to choose Manual or +//! InstantSeal for consensus and exposes a few commands to interact with the node. +//! +//! ### Runtime +//! +//! A minimal [`runtime`](`minimal_template_runtime`) (or a state transition function) that +//! is capable of being run on the node. It is built using the [`FRAME`](`frame`) framework +//! that enables the composition of the core logic via separate modules called "pallets". +//! FRAME defines a complete DSL for building such pallets and the runtime itself. +//! +//! #### Transaction Fees +//! +//! The runtime charges a transaction fee for every transaction that is executed. The fee is +//! calculated based on the weight of the transaction (accouting for the execution time) and +//! length of the call data. Please refer to +//! [`benchmarking docs`](`polkadot_sdk_docs::reference_docs::frame_benchmarking_weight`) for +//! more information on how the weight is calculated. +//! +//! This template sets the fee as independent of the weight of the extrinsic and fixed for any +//! length of the call data for demo purposes. +//! +//! ### Pallet +//! +//! A minimal [`pallet`](`pallet_minimal_template`) that is built using FRAME. It is a unit of +//! encapsulated logic that has a clearly defined responsibility and can be linked to other pallets. +//! +//! ## Getting Started +//! +//! To get started with the template, follow the steps below: +//! +//! ### Build the Node +//! +//! Build the node using the following command: +//! +//! ```bash +//! cargo build -p minimal-template-node --release +//! ``` +//! +//! ### Run the Node +//! +//! Run the node using the following command: +//! +//! ```bash +//! ./target/release/minimal-template-node --dev +//! ``` +//! +//! ### CLI Options +//! +//! The node exposes a few options that can be used to interact with the node. To see the list of +//! available options, run the following command: +//! +//! ```bash +//! ./target/release/minimal-template-node --help +//! ``` +//! +//! #### Consensus Algorithm +//! +//! In order to run the node with a specific consensus algorithm, use the `--consensus` flag. For +//! example, to run the node with ManualSeal consensus with a block time of 5000ms, use the +//! following command: +//! +//! ```bash +//! ./target/release/minimal-template-node --dev --consensus manual-seal-5000 +//! ``` -- GitLab From b8593ccd1bddacc87a11559fe845db43b7f4ec6d Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 30 Apr 2024 16:42:50 +0300 Subject: [PATCH 095/107] BEEFY: Define basic fisherman (#4328) Related to https://github.com/paritytech/polkadot-sdk/pull/1903 For #1903 we will need to add a Fisherman struct. This PR: - defines a basic version of `Fisherman` and moves into it the logic that we have now for reporting double voting equivocations - splits the logic for generating the key ownership proofs into a more generic separate method - renames `EquivocationProof` to `DoubleVotingProof` since later we will introduce a new type of equivocation The PR doesn't contain any functional changes --- polkadot/node/service/src/fake_runtime_api.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- substrate/bin/node/runtime/src/lib.rs | 2 +- substrate/client/consensus/beefy/Cargo.toml | 2 +- .../client/consensus/beefy/src/fisherman.rs | 162 ++++++++++++++++++ substrate/client/consensus/beefy/src/lib.rs | 19 +- substrate/client/consensus/beefy/src/round.rs | 10 +- substrate/client/consensus/beefy/src/tests.rs | 6 +- .../client/consensus/beefy/src/worker.rs | 93 +++------- substrate/frame/beefy/src/equivocation.rs | 4 +- substrate/frame/beefy/src/lib.rs | 8 +- .../primitives/consensus/beefy/src/lib.rs | 8 +- .../consensus/beefy/src/test_utils.rs | 6 +- 15 files changed, 225 insertions(+), 103 deletions(-) create mode 100644 substrate/client/consensus/beefy/src/fisherman.rs diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index c6cfb7a27d0..89613040dca 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -242,7 +242,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - _: beefy_primitives::EquivocationProof< + _: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1cfe9adfe13..287ae9937da 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2018,7 +2018,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: beefy_primitives::EquivocationProof< + equivocation_proof: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index d0f1ff0035f..87becf73cb7 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1009,7 +1009,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - _equivocation_proof: beefy_primitives::EquivocationProof< + _equivocation_proof: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index de961bb4c39..7125f5d34c4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1966,7 +1966,7 @@ sp_api::impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: beefy_primitives::EquivocationProof< + equivocation_proof: beefy_primitives::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 5d8016532a5..18b0d0c31a4 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -3053,7 +3053,7 @@ impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: sp_consensus_beefy::EquivocationProof< + equivocation_proof: sp_consensus_beefy::DoubleVotingProof< BlockNumber, BeefyId, BeefySignature, diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 7b61b3c6c01..435604a9473 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -39,7 +39,6 @@ sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } sp-core = { path = "../../../primitives/core" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-keystore = { path = "../../../primitives/keystore" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } tokio = "1.37" @@ -51,6 +50,7 @@ sc-block-builder = { path = "../../block-builder" } sc-network-test = { path = "../../network/test" } sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" } sp-keyring = { path = "../../../primitives/keyring" } +sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-tracing = { path = "../../../primitives/tracing" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs new file mode 100644 index 00000000000..a2b4c8f945d --- /dev/null +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{error::Error, keystore::BeefyKeystore, round::Rounds, LOG_TARGET}; +use log::{debug, error, warn}; +use sc_client_api::Backend; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_consensus_beefy::{ + check_equivocation_proof, + ecdsa_crypto::{AuthorityId, Signature}, + BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block, NumberFor}, +}; +use std::{marker::PhantomData, sync::Arc}; + +/// Helper struct containing the id and the key ownership proof for a validator. +pub struct ProvedValidator<'a> { + pub id: &'a AuthorityId, + pub key_owner_proof: OpaqueKeyOwnershipProof, +} + +/// Helper used to check and report equivocations. +pub struct Fisherman { + backend: Arc, + runtime: Arc, + key_store: Arc>, + + _phantom: PhantomData, +} + +impl, RuntimeApi: ProvideRuntimeApi> Fisherman +where + RuntimeApi::Api: BeefyApi, +{ + pub fn new( + backend: Arc, + runtime: Arc, + keystore: Arc>, + ) -> Self { + Self { backend, runtime, key_store: keystore, _phantom: Default::default() } + } + + fn prove_offenders<'a>( + &self, + at: BlockId, + offender_ids: impl Iterator, + validator_set_id: ValidatorSetId, + ) -> Result>, Error> { + let hash = match at { + BlockId::Hash(hash) => hash, + BlockId::Number(number) => self + .backend + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(number)) + .map_err(|err| { + Error::Backend(format!( + "Couldn't get hash for block #{:?} (error: {:?}). \ + Skipping report for equivocation", + at, err + )) + })?, + }; + + let runtime_api = self.runtime.runtime_api(); + let mut proved_offenders = vec![]; + for offender_id in offender_ids { + match runtime_api.generate_key_ownership_proof( + hash, + validator_set_id, + offender_id.clone(), + ) { + Ok(Some(key_owner_proof)) => { + proved_offenders.push(ProvedValidator { id: offender_id, key_owner_proof }); + }, + Ok(None) => { + debug!( + target: LOG_TARGET, + "🥩 Equivocation offender {} not part of the authority set {}.", + offender_id, validator_set_id + ); + }, + Err(e) => { + error!( + target: LOG_TARGET, + "🥩 Error generating key ownership proof for equivocation offender {} \ + in authority set {}: {}", + offender_id, validator_set_id, e + ); + }, + }; + } + + Ok(proved_offenders) + } + + /// Report the given equivocation to the BEEFY runtime module. This method + /// generates a session membership proof of the offender and then submits an + /// extrinsic to report the equivocation. In particular, the session membership + /// proof must be generated at the block at which the given set was active which + /// isn't necessarily the best block if there are pending authority set changes. + pub fn report_double_voting( + &self, + proof: DoubleVotingProof, AuthorityId, Signature>, + active_rounds: &Rounds, + ) -> Result<(), Error> { + let (validators, validator_set_id) = + (active_rounds.validators(), active_rounds.validator_set_id()); + let offender_id = proof.offender_id(); + + if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { + debug!(target: LOG_TARGET, "🥩 Skipping report for bad equivocation {:?}", proof); + return Ok(()) + } + + if let Some(local_id) = self.key_store.authority_id(validators) { + if offender_id == &local_id { + warn!(target: LOG_TARGET, "🥩 Skipping report for own equivocation"); + return Ok(()) + } + } + + let key_owner_proofs = self.prove_offenders( + BlockId::Number(*proof.round_number()), + vec![offender_id].into_iter(), + validator_set_id, + )?; + + // submit equivocation report at **best** block + let best_block_hash = self.backend.blockchain().info().best_hash; + for ProvedValidator { key_owner_proof, .. } in key_owner_proofs { + self.runtime + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + best_block_hash, + proof.clone(), + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + } + + Ok(()) + } +} diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 2637481fbf3..0e49839f0fd 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -43,11 +43,10 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, PayloadProvider, ValidatorSet, + ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; -use sp_mmr_primitives::MmrApi; use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; use std::{ collections::{BTreeMap, VecDeque}, @@ -69,6 +68,7 @@ pub mod justification; use crate::{ communication::gossip::GossipValidator, + fisherman::Fisherman, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metrics::VoterMetrics, @@ -80,6 +80,7 @@ pub use communication::beefy_protocol_name::{ }; use sp_runtime::generic::OpaqueDigestItemId; +mod fisherman; #[cfg(test)] mod tests; @@ -305,14 +306,16 @@ where pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, is_authority: bool, ) -> BeefyWorker { + let key_store = Arc::new(self.key_store); BeefyWorker { - backend: self.backend, - runtime: self.runtime, - key_store: self.key_store, - metrics: self.metrics, - persisted_state: self.persisted_state, + backend: self.backend.clone(), + runtime: self.runtime.clone(), + key_store: key_store.clone(), payload_provider, sync, + fisherman: Arc::new(Fisherman::new(self.backend, self.runtime, key_store)), + metrics: self.metrics, + persisted_state: self.persisted_state, comms, links, pending_justifications, @@ -487,7 +490,7 @@ pub async fn start_beefy_gadget( C: Client + BlockBackend, P: PayloadProvider + Clone, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi>, + R::Api: BeefyApi, N: GossipNetwork + NetworkRequest + Send + Sync + 'static, S: GossipSyncing + SyncOracle + 'static, { diff --git a/substrate/client/consensus/beefy/src/round.rs b/substrate/client/consensus/beefy/src/round.rs index 0045dc70c26..5dae80cb183 100644 --- a/substrate/client/consensus/beefy/src/round.rs +++ b/substrate/client/consensus/beefy/src/round.rs @@ -22,7 +22,7 @@ use codec::{Decode, Encode}; use log::{debug, info}; use sp_consensus_beefy::{ ecdsa_crypto::{AuthorityId, Signature}, - Commitment, EquivocationProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, + Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, }; use sp_runtime::traits::{Block, NumberFor}; use std::collections::BTreeMap; @@ -61,7 +61,7 @@ pub fn threshold(authorities: usize) -> usize { pub enum VoteImportResult { Ok, RoundConcluded(SignedCommitment, Signature>), - Equivocation(EquivocationProof, AuthorityId, Signature>), + DoubleVoting(DoubleVotingProof, AuthorityId, Signature>), Invalid, Stale, } @@ -153,7 +153,7 @@ where target: LOG_TARGET, "🥩 detected equivocated vote: 1st: {:?}, 2nd: {:?}", previous_vote, vote ); - return VoteImportResult::Equivocation(EquivocationProof { + return VoteImportResult::DoubleVoting(DoubleVotingProof { first: previous_vote.clone(), second: vote, }) @@ -207,7 +207,7 @@ mod tests { use sc_network_test::Block; use sp_consensus_beefy::{ - known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, EquivocationProof, Payload, + known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, DoubleVotingProof, Payload, SignedCommitment, ValidatorSet, VoteMessage, }; @@ -494,7 +494,7 @@ mod tests { let mut alice_vote2 = alice_vote1.clone(); alice_vote2.commitment = commitment2; - let expected_result = VoteImportResult::Equivocation(EquivocationProof { + let expected_result = VoteImportResult::DoubleVoting(DoubleVotingProof { first: alice_vote1.clone(), second: alice_vote2.clone(), }); diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 9b13d1da6d7..2bb145d660d 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -59,7 +59,7 @@ use sp_consensus_beefy::{ known_payloads, mmr::{find_mmr_root_digest, MmrRootProvider}, test_utils::Keyring as BeefyKeyring, - BeefyApi, Commitment, ConsensusLog, EquivocationProof, MmrRootHash, OpaqueKeyOwnershipProof, + BeefyApi, Commitment, ConsensusLog, DoubleVotingProof, MmrRootHash, OpaqueKeyOwnershipProof, Payload, SignedCommitment, ValidatorSet, ValidatorSetId, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; @@ -259,7 +259,7 @@ pub(crate) struct TestApi { pub validator_set: Option, pub mmr_root_hash: MmrRootHash, pub reported_equivocations: - Option, AuthorityId, Signature>>>>>, + Option, AuthorityId, Signature>>>>>, } impl TestApi { @@ -313,7 +313,7 @@ sp_api::mock_impl_runtime_apis! { } fn submit_report_equivocation_unsigned_extrinsic( - proof: EquivocationProof, AuthorityId, Signature>, + proof: DoubleVotingProof, AuthorityId, Signature>, _dummy: OpaqueKeyOwnershipProof, ) -> Option<()> { if let Some(equivocations_buf) = self.inner.reported_equivocations.as_ref() { diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 05575ae01c3..cfbb3d63aea 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -23,6 +23,7 @@ use crate::{ }, error::Error, find_authorities_change, + fisherman::Fisherman, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, @@ -39,10 +40,9 @@ use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ - check_equivocation_proof, ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, BeefySignatureHasher, Commitment, EquivocationProof, PayloadProvider, ValidatorSet, - VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, + BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, VersionedFinalityProof, + VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ generic::BlockId, @@ -377,9 +377,10 @@ pub(crate) struct BeefyWorker { // utilities pub backend: Arc, pub runtime: Arc, - pub key_store: BeefyKeystore, + pub key_store: Arc>, pub payload_provider: P, pub sync: Arc, + pub fisherman: Arc>, // communication (created once, but returned and reused if worker is restarted/reinitialized) pub comms: BeefyComms, @@ -590,9 +591,9 @@ where } metric_inc!(self.metrics, beefy_good_votes_processed); }, - VoteImportResult::Equivocation(proof) => { + VoteImportResult::DoubleVoting(proof) => { metric_inc!(self.metrics, beefy_equivocation_votes); - self.report_equivocation(proof)?; + self.report_double_voting(proof)?; }, VoteImportResult::Invalid => metric_inc!(self.metrics, beefy_invalid_votes), VoteImportResult::Stale => metric_inc!(self.metrics, beefy_stale_votes), @@ -941,64 +942,13 @@ where (error, self.comms) } - /// Report the given equivocation to the BEEFY runtime module. This method - /// generates a session membership proof of the offender and then submits an - /// extrinsic to report the equivocation. In particular, the session membership - /// proof must be generated at the block at which the given set was active which - /// isn't necessarily the best block if there are pending authority set changes. - pub(crate) fn report_equivocation( + /// Report the given equivocation to the BEEFY runtime module. + fn report_double_voting( &self, - proof: EquivocationProof, AuthorityId, Signature>, + proof: DoubleVotingProof, AuthorityId, Signature>, ) -> Result<(), Error> { let rounds = self.persisted_state.voting_oracle.active_rounds()?; - let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); - let offender_id = proof.offender_id().clone(); - - if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { - debug!(target: LOG_TARGET, "🥩 Skip report for bad equivocation {:?}", proof); - return Ok(()) - } else if let Some(local_id) = self.key_store.authority_id(validators) { - if offender_id == local_id { - warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); - return Ok(()) - } - } - - let number = *proof.round_number(); - let hash = self - .backend - .blockchain() - .expect_block_hash_from_id(&BlockId::Number(number)) - .map_err(|err| { - let err_msg = format!( - "Couldn't get hash for block #{:?} (error: {:?}), skipping report for equivocation", - number, err - ); - Error::Backend(err_msg) - })?; - let runtime_api = self.runtime.runtime_api(); - // generate key ownership proof at that block - let key_owner_proof = match runtime_api - .generate_key_ownership_proof(hash, validator_set_id, offender_id) - .map_err(Error::RuntimeApi)? - { - Some(proof) => proof, - None => { - debug!( - target: LOG_TARGET, - "🥩 Equivocation offender not part of the authority set." - ); - return Ok(()) - }, - }; - - // submit equivocation report at **best** block - let best_block_hash = self.backend.blockchain().info().best_hash; - runtime_api - .submit_report_equivocation_unsigned_extrinsic(best_block_hash, proof, key_owner_proof) - .map_err(Error::RuntimeApi)?; - - Ok(()) + self.fisherman.report_double_voting(proof, rounds) } } @@ -1165,13 +1115,15 @@ pub(crate) mod tests { .unwrap(); let payload_provider = MmrRootProvider::new(api.clone()); let comms = BeefyComms { gossip_engine, gossip_validator, on_demand_justifications }; + let key_store: Arc> = Arc::new(Some(keystore).into()); BeefyWorker { - backend, - runtime: api, - key_store: Some(keystore).into(), + backend: backend.clone(), + runtime: api.clone(), + key_store: key_store.clone(), metrics, payload_provider, sync: Arc::new(sync), + fisherman: Arc::new(Fisherman::new(backend, api, key_store)), links, comms, pending_justifications: BTreeMap::new(), @@ -1590,6 +1542,11 @@ pub(crate) mod tests { let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); worker.runtime = api_alice.clone(); + worker.fisherman = Arc::new(Fisherman::new( + worker.backend.clone(), + worker.runtime.clone(), + worker.key_store.clone(), + )); // let there be a block with num = 1: let _ = net.peer(0).push_blocks(1, false); @@ -1604,7 +1561,7 @@ pub(crate) mod tests { ); { // expect voter (Alice) to successfully report it - assert_eq!(worker.report_equivocation(good_proof.clone()), Ok(())); + assert_eq!(worker.report_double_voting(good_proof.clone()), Ok(())); // verify Alice reports Bob equivocation to runtime let reported = api_alice.reported_equivocations.as_ref().unwrap().lock(); assert_eq!(reported.len(), 1); @@ -1616,7 +1573,7 @@ pub(crate) mod tests { let mut bad_proof = good_proof.clone(); bad_proof.first.id = Keyring::Charlie.public(); // bad proofs are simply ignored - assert_eq!(worker.report_equivocation(bad_proof), Ok(())); + assert_eq!(worker.report_double_voting(bad_proof), Ok(())); // verify nothing reported to runtime assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); @@ -1625,7 +1582,7 @@ pub(crate) mod tests { old_proof.first.commitment.validator_set_id = 0; old_proof.second.commitment.validator_set_id = 0; // old proofs are simply ignored - assert_eq!(worker.report_equivocation(old_proof), Ok(())); + assert_eq!(worker.report_double_voting(old_proof), Ok(())); // verify nothing reported to runtime assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); @@ -1635,7 +1592,7 @@ pub(crate) mod tests { (block_num, payload2.clone(), set_id, &Keyring::Alice), ); // equivocations done by 'self' are simply ignored (not reported) - assert_eq!(worker.report_equivocation(self_proof), Ok(())); + assert_eq!(worker.report_double_voting(self_proof), Ok(())); // verify nothing reported to runtime assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty()); } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index bbc6eae6af2..aecc9e721d5 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -38,7 +38,7 @@ use codec::{self as codec, Decode, Encode}; use frame_support::traits::{Get, KeyOwnerProofSystem}; use frame_system::pallet_prelude::BlockNumberFor; use log::{error, info}; -use sp_consensus_beefy::{EquivocationProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; +use sp_consensus_beefy::{DoubleVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -123,7 +123,7 @@ pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T, /// Equivocation evidence convenience alias. pub type EquivocationEvidenceFor = ( - EquivocationProof< + DoubleVotingProof< BlockNumberFor, ::BeefyId, <::BeefyId as RuntimeAppPublic>::Signature, diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 09cd13ab70a..63f3e9bb309 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -41,7 +41,7 @@ use sp_staking::{offence::OffenceReportSystem, SessionIndex}; use sp_std::prelude::*; use sp_consensus_beefy::{ - AuthorityIndex, BeefyAuthorityId, ConsensusLog, EquivocationProof, OnNewValidatorSet, + AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, OnNewValidatorSet, ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, }; @@ -210,7 +210,7 @@ pub mod pallet { pub fn report_equivocation( origin: OriginFor, equivocation_proof: Box< - EquivocationProof< + DoubleVotingProof< BlockNumberFor, T::BeefyId, ::Signature, @@ -245,7 +245,7 @@ pub mod pallet { pub fn report_equivocation_unsigned( origin: OriginFor, equivocation_proof: Box< - EquivocationProof< + DoubleVotingProof< BlockNumberFor, T::BeefyId, ::Signature, @@ -368,7 +368,7 @@ impl Pallet { /// an unsigned extrinsic with a call to `report_equivocation_unsigned` and /// will push the transaction to the pool. Only useful in an offchain context. pub fn submit_unsigned_equivocation_report( - equivocation_proof: EquivocationProof< + equivocation_proof: DoubleVotingProof< BlockNumberFor, T::BeefyId, ::Signature, diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 6f644c5f790..390c0ff7127 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -306,14 +306,14 @@ pub struct VoteMessage { /// BEEFY happens when a voter votes on the same round/block for different payloads. /// Proving is achieved by collecting the signed commitments of conflicting votes. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] -pub struct EquivocationProof { +pub struct DoubleVotingProof { /// The first vote in the equivocation. pub first: VoteMessage, /// The second vote in the equivocation. pub second: VoteMessage, } -impl EquivocationProof { +impl DoubleVotingProof { /// Returns the authority id of the equivocator. pub fn offender_id(&self) -> &Id { &self.first.id @@ -347,7 +347,7 @@ where /// Verifies the equivocation proof by making sure that both votes target /// different blocks and that its signatures are valid. pub fn check_equivocation_proof( - report: &EquivocationProof::Signature>, + report: &DoubleVotingProof::Signature>, ) -> bool where Id: BeefyAuthorityId + PartialEq, @@ -437,7 +437,7 @@ sp_api::decl_runtime_apis! { /// hardcoded to return `None`). Only useful in an offchain context. fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: - EquivocationProof, AuthorityId, ::Signature>, + DoubleVotingProof, AuthorityId, ::Signature>, key_owner_proof: OpaqueKeyOwnershipProof, ) -> Option<()>; diff --git a/substrate/primitives/consensus/beefy/src/test_utils.rs b/substrate/primitives/consensus/beefy/src/test_utils.rs index ec13c9c6900..d7fd49214f1 100644 --- a/substrate/primitives/consensus/beefy/src/test_utils.rs +++ b/substrate/primitives/consensus/beefy/src/test_utils.rs @@ -18,7 +18,7 @@ #[cfg(feature = "bls-experimental")] use crate::ecdsa_bls_crypto; use crate::{ - ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, EquivocationProof, Payload, + ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, Payload, ValidatorSetId, VoteMessage, }; use sp_application_crypto::{AppCrypto, AppPair, RuntimeAppPublic, Wraps}; @@ -140,7 +140,7 @@ impl From> for ecdsa_crypto::Public { pub fn generate_equivocation_proof( vote1: (u64, Payload, ValidatorSetId, &Keyring), vote2: (u64, Payload, ValidatorSetId, &Keyring), -) -> EquivocationProof { +) -> DoubleVotingProof { let signed_vote = |block_number: u64, payload: Payload, validator_set_id: ValidatorSetId, @@ -151,5 +151,5 @@ pub fn generate_equivocation_proof( }; let first = signed_vote(vote1.0, vote1.1, vote1.2, vote1.3); let second = signed_vote(vote2.0, vote2.1, vote2.2, vote2.3); - EquivocationProof { first, second } + DoubleVotingProof { first, second } } -- GitLab From c973fe86f8c668462186c95655a58fda04508e9a Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 30 Apr 2024 16:29:14 +0200 Subject: [PATCH 096/107] Contracts: revert reverted changes from 4266 (#4277) revert some reverted changes from #4266 --- .../src/parachain/contracts_config.rs | 14 ++------ .../frame/contracts/mock-network/src/tests.rs | 25 +------------- substrate/frame/contracts/src/lib.rs | 3 ++ substrate/frame/contracts/src/wasm/runtime.rs | 33 ++----------------- 4 files changed, 9 insertions(+), 66 deletions(-) diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index 20fdd9a243d..bf3c00b3ff1 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -14,8 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeHoldReason}; -use frame_support::{derive_impl, parameter_types, traits::Contains}; +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; +use crate::parachain::RuntimeHoldReason; +use frame_support::{derive_impl, parameter_types}; parameter_types! { pub Schedule: pallet_contracts::Schedule = Default::default(); @@ -28,14 +29,5 @@ impl pallet_contracts::Config for Runtime { type Currency = Balances; type Schedule = Schedule; type Time = super::Timestamp; - type CallFilter = CallFilter; type Xcm = pallet_xcm::Pallet; } - -/// In this mock, we only allow other contract calls via XCM. -pub struct CallFilter; -impl Contains for CallFilter { - fn contains(call: &RuntimeCall) -> bool { - matches!(call, RuntimeCall::Contracts(pallet_contracts::Call::call { .. })) - } -} diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index e7d1f6279aa..48a94e172a0 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -22,10 +22,7 @@ use crate::{ relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; -use frame_support::{ - assert_err, - traits::{fungibles::Mutate, Currency}, -}; +use frame_support::traits::{fungibles::Mutate, Currency}; use pallet_contracts::{test_utils::builder::*, Code}; use pallet_contracts_fixtures::compile_module; use pallet_contracts_uapi::ReturnErrorCode; @@ -132,26 +129,6 @@ fn test_xcm_execute_incomplete() { }); } -#[test] -fn test_xcm_execute_filtered_call() { - MockNet::reset(); - - let contract_addr = instantiate_test_contract("xcm_execute"); - - ParaA::execute_with(|| { - // `remark` should be rejected, as it is not allowed by our CallFilter. - let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); - let message: Xcm = Xcm::builder_unsafe() - .transact(OriginKind::Native, Weight::MAX, call.encode()) - .build(); - let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode()) - .build() - .result; - assert_err!(result, frame_system::Error::::CallFiltered); - }); -} - #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 0045d72141c..3e87eb9f37e 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -307,6 +307,9 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. + /// + /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact + /// calls, you must configure them separately within the XCM pallet itself. #[pallet::no_default_bounds] type CallFilter: Contains<::RuntimeCall>; diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 52ceda99edb..3212aff3126 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -25,12 +25,8 @@ use crate::{ }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, - ensure, - pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, - parameter_types, - traits::Get, - weights::Weight, + dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, + traits::Get, weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -41,7 +37,6 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; -use xcm::VersionedXcm; type CallOf = ::RuntimeCall; @@ -378,29 +373,6 @@ fn already_charged(_: u32) -> Option { None } -/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] -/// instruction with a call that is not allowed by the CallFilter. -fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { - use frame_support::traits::Contains; - use xcm::prelude::{Transact, Xcm}; - - let mut message: Xcm> = - message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; - - message.iter_mut().try_for_each(|inst| -> DispatchResult { - let Transact { ref mut call, .. } = inst else { return Ok(()) }; - let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; - - if !::CallFilter::contains(call) { - return Err(frame_system::Error::::CallFiltered.into()) - } - - Ok(()) - })?; - - Ok(()) -} - /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2117,7 +2089,6 @@ pub mod env { ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let message: VersionedXcm> = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - ensure_executable::(&message)?; let execute_weight = <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); -- GitLab From 6d392c7eea496e0874a9ea37f4a8ea447ebc330e Mon Sep 17 00:00:00 2001 From: Maciej Date: Wed, 1 May 2024 18:17:55 +0100 Subject: [PATCH 097/107] Statement Distribution Per Peer Rate Limit (#3444) - [x] Drop requests from a PeerID that is already being served by us. - [x] Don't sent requests to a PeerID if we already are requesting something from them at that moment (prioritise other requests or wait). - [x] Tests - [ ] ~~Add a small rep update for unsolicited requests (same peer request)~~ not included in original PR due to potential issues with nodes slowly updating - [x] Add a metric to track the amount of dropped requests due to peer rate limiting - [x] Add a metric to track how many time a node reaches the max parallel requests limit in v2+ Helps with but does not close yet: https://github.com/paritytech-secops/srlabs_findings/issues/303 --- .gitlab/pipeline/zombienet/polkadot.yml | 8 + Cargo.lock | 1 + polkadot/node/malus/Cargo.toml | 1 + polkadot/node/malus/src/malus.rs | 7 + polkadot/node/malus/src/variants/mod.rs | 2 + .../src/variants/spam_statement_requests.rs | 155 ++++++++++++++ .../network/statement-distribution/src/lib.rs | 1 + .../statement-distribution/src/metrics.rs | 40 +++- .../statement-distribution/src/v2/mod.rs | 75 +++++-- .../statement-distribution/src/v2/requests.rs | 200 ++++++++++++++++-- .../src/v2/tests/requests.rs | 2 +- ...-spam-statement-distribution-requests.toml | 43 ++++ ...spam-statement-distribution-requests.zndsl | 27 +++ prdoc/pr_3444.prdoc | 25 +++ 14 files changed, 539 insertions(+), 48 deletions(-) create mode 100644 polkadot/node/malus/src/variants/spam_statement_requests.rs create mode 100644 polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml create mode 100644 polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl create mode 100644 prdoc/pr_3444.prdoc diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 6b72075c513..38c5332f309 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -176,6 +176,14 @@ zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains: --local-dir="${LOCAL_DIR}/elastic_scaling" --test="0002-elastic-scaling-doesnt-break-parachains.zndsl" +zombienet-polkadot-functional-0012-spam-statement-distribution-requests: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0012-spam-statement-distribution-requests.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index 1fe4012070a..c3bf3f26385 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14272,6 +14272,7 @@ dependencies = [ "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", + "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index 2f63c2f0938..750074fa9b3 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -37,6 +37,7 @@ polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator" polkadot-node-core-candidate-validation = { path = "../core/candidate-validation" } polkadot-node-core-backing = { path = "../core/backing" } polkadot-node-primitives = { path = "../primitives" } +polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-primitives = { path = "../../primitives" } color-eyre = { version = "0.6.1", default-features = false } assert_matches = "1.5" diff --git a/polkadot/node/malus/src/malus.rs b/polkadot/node/malus/src/malus.rs index 7a9e320e273..6257201537c 100644 --- a/polkadot/node/malus/src/malus.rs +++ b/polkadot/node/malus/src/malus.rs @@ -40,6 +40,8 @@ enum NemesisVariant { DisputeAncestor(DisputeAncestorOptions), /// Delayed disputing of finalized candidates. DisputeFinalizedCandidates(DisputeFinalizedCandidatesOptions), + /// Spam many request statements instead of sending a single one. + SpamStatementRequests(SpamStatementRequestsOptions), } #[derive(Debug, Parser)] @@ -98,6 +100,11 @@ impl MalusCli { finality_delay, )? }, + NemesisVariant::SpamStatementRequests(opts) => { + let SpamStatementRequestsOptions { spam_factor, cli } = opts; + + polkadot_cli::run_node(cli, SpamStatementRequests { spam_factor }, finality_delay)? + }, } Ok(()) } diff --git a/polkadot/node/malus/src/variants/mod.rs b/polkadot/node/malus/src/variants/mod.rs index 3ca1bf4b469..ec945ae1945 100644 --- a/polkadot/node/malus/src/variants/mod.rs +++ b/polkadot/node/malus/src/variants/mod.rs @@ -20,6 +20,7 @@ mod back_garbage_candidate; mod common; mod dispute_finalized_candidates; mod dispute_valid_candidates; +mod spam_statement_requests; mod suggest_garbage_candidate; mod support_disabled; @@ -27,6 +28,7 @@ pub(crate) use self::{ back_garbage_candidate::{BackGarbageCandidateOptions, BackGarbageCandidates}, dispute_finalized_candidates::{DisputeFinalizedCandidates, DisputeFinalizedCandidatesOptions}, dispute_valid_candidates::{DisputeAncestorOptions, DisputeValidCandidates}, + spam_statement_requests::{SpamStatementRequests, SpamStatementRequestsOptions}, suggest_garbage_candidate::{SuggestGarbageCandidateOptions, SuggestGarbageCandidates}, support_disabled::{SupportDisabled, SupportDisabledOptions}, }; diff --git a/polkadot/node/malus/src/variants/spam_statement_requests.rs b/polkadot/node/malus/src/variants/spam_statement_requests.rs new file mode 100644 index 00000000000..c5970c988ac --- /dev/null +++ b/polkadot/node/malus/src/variants/spam_statement_requests.rs @@ -0,0 +1,155 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A malicious node variant that attempts spam statement requests. +//! +//! This malus variant behaves honestly in everything except when propagating statement distribution +//! requests through the network bridge subsystem. Instead of sending a single request when it needs +//! something it attempts to spam the peer with multiple requests. +//! +//! Attention: For usage with `zombienet` only! + +#![allow(missing_docs)] + +use polkadot_cli::{ + service::{ + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, + }, + validator_overseer_builder, Cli, +}; +use polkadot_node_network_protocol::request_response::{outgoing::Requests, OutgoingRequest}; +use polkadot_node_subsystem::{messages::NetworkBridgeTxMessage, SpawnGlue}; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; +use sp_core::traits::SpawnNamed; + +// Filter wrapping related types. +use crate::{interceptor::*, shared::MALUS}; + +use std::sync::Arc; + +/// Wraps around network bridge and replaces it. +#[derive(Clone)] +struct RequestSpammer { + spam_factor: u32, // How many statement distribution requests to send. +} + +impl MessageInterceptor for RequestSpammer +where + Sender: overseer::NetworkBridgeTxSenderTrait + Clone + Send + 'static, +{ + type Message = NetworkBridgeTxMessage; + + /// Intercept NetworkBridgeTxMessage::SendRequests with Requests::AttestedCandidateV2 inside and + /// duplicate that request + fn intercept_incoming( + &self, + _subsystem_sender: &mut Sender, + msg: FromOrchestra, + ) -> Option> { + match msg { + FromOrchestra::Communication { + msg: NetworkBridgeTxMessage::SendRequests(requests, if_disconnected), + } => { + let mut new_requests = Vec::new(); + + for request in requests { + match request { + Requests::AttestedCandidateV2(ref req) => { + // Temporarily store peer and payload for duplication + let peer_to_duplicate = req.peer.clone(); + let payload_to_duplicate = req.payload.clone(); + // Push the original request + new_requests.push(request); + + // Duplicate for spam purposes + gum::info!( + target: MALUS, + "😈 Duplicating AttestedCandidateV2 request extra {:?} times to peer: {:?}.", self.spam_factor, peer_to_duplicate, + ); + new_requests.extend((0..self.spam_factor - 1).map(|_| { + let (new_outgoing_request, _) = OutgoingRequest::new( + peer_to_duplicate.clone(), + payload_to_duplicate.clone(), + ); + Requests::AttestedCandidateV2(new_outgoing_request) + })); + }, + _ => { + new_requests.push(request); + }, + } + } + + // Passthrough the message with a potentially modified number of requests + Some(FromOrchestra::Communication { + msg: NetworkBridgeTxMessage::SendRequests(new_requests, if_disconnected), + }) + }, + FromOrchestra::Communication { msg } => Some(FromOrchestra::Communication { msg }), + FromOrchestra::Signal(signal) => Some(FromOrchestra::Signal(signal)), + } + } +} + +//---------------------------------------------------------------------------------- + +#[derive(Debug, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct SpamStatementRequestsOptions { + /// How many statement distribution requests to send. + #[clap(long, ignore_case = true, default_value_t = 1000, value_parser = clap::value_parser!(u32).range(0..=10000000))] + pub spam_factor: u32, + + #[clap(flatten)] + pub cli: Cli, +} + +/// SpamStatementRequests implementation wrapper which implements `OverseerGen` glue. +pub(crate) struct SpamStatementRequests { + /// How many statement distribution requests to send. + pub spam_factor: u32, +} + +impl OverseerGen for SpamStatementRequests { + fn generate( + &self, + connector: OverseerConnector, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, + ext_args: Option, + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> + where + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, + Spawner: 'static + SpawnNamed + Clone + Unpin, + { + gum::info!( + target: MALUS, + "😈 Started Malus node that duplicates each statement distribution request spam_factor = {:?} times.", + &self.spam_factor, + ); + + let request_spammer = RequestSpammer { spam_factor: self.spam_factor }; + + validator_overseer_builder( + args, + ext_args.expect("Extended arguments required to build validator overseer are provided"), + )? + .replace_network_bridge_tx(move |cb| InterceptedSubsystem::new(cb, request_spammer)) + .build_with_connector(connector) + .map_err(|e| e.into()) + } +} diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 7e91d284912..4ca199c3378 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -207,6 +207,7 @@ impl StatementDistributionSubsystem { v2::respond_task( self.req_receiver.take().expect("Mandatory argument to new. qed"), res_sender.clone(), + self.metrics.clone(), ) .boxed(), ) diff --git a/polkadot/node/network/statement-distribution/src/metrics.rs b/polkadot/node/network/statement-distribution/src/metrics.rs index b9a51dc89d6..1bc99417426 100644 --- a/polkadot/node/network/statement-distribution/src/metrics.rs +++ b/polkadot/node/network/statement-distribution/src/metrics.rs @@ -24,14 +24,19 @@ const HISTOGRAM_LATENCY_BUCKETS: &[f64] = &[ #[derive(Clone)] struct MetricsInner { + // V1 statements_distributed: prometheus::Counter, sent_requests: prometheus::Counter, received_responses: prometheus::CounterVec, - active_leaves_update: prometheus::Histogram, - share: prometheus::Histogram, network_bridge_update: prometheus::HistogramVec, statements_unexpected: prometheus::CounterVec, created_message_size: prometheus::Gauge, + // V1+ + active_leaves_update: prometheus::Histogram, + share: prometheus::Histogram, + // V2+ + peer_rate_limit_request_drop: prometheus::Counter, + max_parallel_requests_reached: prometheus::Counter, } /// Statement Distribution metrics. @@ -114,6 +119,23 @@ impl Metrics { metrics.created_message_size.set(size as u64); } } + + /// Update sent dropped requests counter when request dropped because + /// of peer rate limit + pub fn on_request_dropped_peer_rate_limit(&self) { + if let Some(metrics) = &self.0 { + metrics.peer_rate_limit_request_drop.inc(); + } + } + + /// Update max parallel requests reached counter + /// This counter is updated when the maximum number of parallel requests is reached + /// and we are waiting for one of the requests to finish + pub fn on_max_parallel_requests_reached(&self) { + if let Some(metrics) = &self.0 { + metrics.max_parallel_requests_reached.inc(); + } + } } impl metrics::Metrics for Metrics { @@ -193,6 +215,20 @@ impl metrics::Metrics for Metrics { ))?, registry, )?, + peer_rate_limit_request_drop: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_statement_distribution_peer_rate_limit_request_drop_total", + "Number of statement distribution requests dropped because of the peer rate limiting.", + )?, + registry, + )?, + max_parallel_requests_reached: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_statement_distribution_max_parallel_requests_reached_total", + "Number of times the maximum number of parallel requests was reached.", + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 118e34e9206..8579ac15cbc 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -59,6 +59,8 @@ use sp_keystore::KeystorePtr; use fatality::Nested; use futures::{ channel::{mpsc, oneshot}, + future::FutureExt, + select, stream::FuturesUnordered, SinkExt, StreamExt, }; @@ -73,6 +75,7 @@ use std::{ use crate::{ error::{JfyiError, JfyiErrorResult}, + metrics::Metrics, LOG_TARGET, }; use candidates::{BadAdvertisement, Candidates, PostConfirmation}; @@ -3423,35 +3426,61 @@ pub(crate) struct ResponderMessage { pub(crate) async fn respond_task( mut receiver: IncomingRequestReceiver, mut sender: mpsc::Sender, + metrics: Metrics, ) { let mut pending_out = FuturesUnordered::new(); + let mut active_peers = HashSet::new(); + loop { - // Ensure we are not handling too many requests in parallel. - if pending_out.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { - // Wait for one to finish: - pending_out.next().await; - } + select! { + // New request + request_result = receiver.recv(|| vec![COST_INVALID_REQUEST]).fuse() => { + let request = match request_result.into_nested() { + Ok(Ok(v)) => v, + Err(fatal) => { + gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder"); + return + }, + Ok(Err(jfyi)) => { + gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed"); + continue + }, + }; - let req = match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() { - Ok(Ok(v)) => v, - Err(fatal) => { - gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder"); - return + // If peer currently being served drop request + if active_peers.contains(&request.peer) { + gum::trace!(target: LOG_TARGET, "Peer already being served, dropping request"); + metrics.on_request_dropped_peer_rate_limit(); + continue + } + + // If we are over parallel limit wait for one to finish + if pending_out.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + gum::trace!(target: LOG_TARGET, "Over max parallel requests, waiting for one to finish"); + metrics.on_max_parallel_requests_reached(); + let (_, peer) = pending_out.select_next_some().await; + active_peers.remove(&peer); + } + + // Start serving the request + let (pending_sent_tx, pending_sent_rx) = oneshot::channel(); + let peer = request.peer; + if let Err(err) = sender + .feed(ResponderMessage { request, sent_feedback: pending_sent_tx }) + .await + { + gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder"); + return + } + let future_with_peer = pending_sent_rx.map(move |result| (result, peer)); + pending_out.push(future_with_peer); + active_peers.insert(peer); }, - Ok(Err(jfyi)) => { - gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed"); - continue + // Request served/finished + result = pending_out.select_next_some() => { + let (_, peer) = result; + active_peers.remove(&peer); }, - }; - - let (pending_sent_tx, pending_sent_rx) = oneshot::channel(); - if let Err(err) = sender - .feed(ResponderMessage { request: req, sent_feedback: pending_sent_tx }) - .await - { - gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder"); - return } - pending_out.push(pending_sent_rx); } } diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index 1ed18ffd42a..b8ed34d26c8 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -366,6 +366,7 @@ impl RequestManager { id, &props, &peer_advertised, + &response_manager, ) { None => continue, Some(t) => t, @@ -387,14 +388,17 @@ impl RequestManager { ); let stored_id = id.clone(); - response_manager.push(Box::pin(async move { - TaggedResponse { - identifier: stored_id, - requested_peer: target, - props, - response: response_fut.await, - } - })); + response_manager.push( + Box::pin(async move { + TaggedResponse { + identifier: stored_id, + requested_peer: target, + props, + response: response_fut.await, + } + }), + target, + ); entry.in_flight = true; @@ -422,28 +426,35 @@ impl RequestManager { /// A manager for pending responses. pub struct ResponseManager { pending_responses: FuturesUnordered>, + active_peers: HashSet, } impl ResponseManager { pub fn new() -> Self { - Self { pending_responses: FuturesUnordered::new() } + Self { pending_responses: FuturesUnordered::new(), active_peers: HashSet::new() } } /// Await the next incoming response to a sent request, or immediately /// return `None` if there are no pending responses. pub async fn incoming(&mut self) -> Option { - self.pending_responses - .next() - .await - .map(|response| UnhandledResponse { response }) + self.pending_responses.next().await.map(|response| { + self.active_peers.remove(&response.requested_peer); + UnhandledResponse { response } + }) } fn len(&self) -> usize { self.pending_responses.len() } - fn push(&mut self, response: BoxFuture<'static, TaggedResponse>) { + fn push(&mut self, response: BoxFuture<'static, TaggedResponse>, target: PeerId) { self.pending_responses.push(response); + self.active_peers.insert(target); + } + + /// Returns true if we are currently sending a request to the peer. + fn is_sending_to(&self, peer: &PeerId) -> bool { + self.active_peers.contains(peer) } } @@ -471,10 +482,16 @@ fn find_request_target_with_update( candidate_identifier: &CandidateIdentifier, props: &RequestProperties, peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, + response_manager: &ResponseManager, ) -> Option { let mut prune = Vec::new(); let mut target = None; for (i, p) in known_by.iter().enumerate() { + // If we are already sending to that peer, skip for now + if response_manager.is_sending_to(p) { + continue + } + let mut filter = match peer_advertised(candidate_identifier, p) { None => { prune.push(i); @@ -1002,7 +1019,8 @@ mod tests { candidate_receipt.descriptor.persisted_validation_data_hash = persisted_validation_data.hash(); let candidate = candidate_receipt.hash(); - let requested_peer = PeerId::random(); + let requested_peer_1 = PeerId::random(); + let requested_peer_2 = PeerId::random(); let identifier1 = request_manager .get_or_insert(relay_parent, candidate, 1.into()) @@ -1010,14 +1028,14 @@ mod tests { .clone(); request_manager .get_or_insert(relay_parent, candidate, 1.into()) - .add_peer(requested_peer); + .add_peer(requested_peer_1); let identifier2 = request_manager .get_or_insert(relay_parent, candidate, 2.into()) .identifier .clone(); request_manager .get_or_insert(relay_parent, candidate, 2.into()) - .add_peer(requested_peer); + .add_peer(requested_peer_2); assert_ne!(identifier1, identifier2); assert_eq!(request_manager.requests.len(), 2); @@ -1053,7 +1071,7 @@ mod tests { let response = UnhandledResponse { response: TaggedResponse { identifier: identifier1, - requested_peer, + requested_peer: requested_peer_1, props: request_properties.clone(), response: Ok(AttestedCandidateResponse { candidate_receipt: candidate_receipt.clone(), @@ -1076,13 +1094,13 @@ mod tests { assert_eq!( output, ResponseValidationOutput { - requested_peer, + requested_peer: requested_peer_1, request_status: CandidateRequestStatus::Complete { candidate: candidate_receipt.clone(), persisted_validation_data: persisted_validation_data.clone(), statements, }, - reputation_changes: vec![(requested_peer, BENEFIT_VALID_RESPONSE)], + reputation_changes: vec![(requested_peer_1, BENEFIT_VALID_RESPONSE)], } ); } @@ -1093,7 +1111,7 @@ mod tests { let response = UnhandledResponse { response: TaggedResponse { identifier: identifier2, - requested_peer, + requested_peer: requested_peer_2, props: request_properties, response: Ok(AttestedCandidateResponse { candidate_receipt: candidate_receipt.clone(), @@ -1115,12 +1133,14 @@ mod tests { assert_eq!( output, ResponseValidationOutput { - requested_peer, + requested_peer: requested_peer_2, request_status: CandidateRequestStatus::Outdated, reputation_changes: vec![], } ); } + + assert_eq!(request_manager.requests.len(), 0); } // Test case where we had a request in-flight and the request entry was garbage-collected on @@ -1293,4 +1313,140 @@ mod tests { assert_eq!(request_manager.requests.len(), 0); assert_eq!(request_manager.by_priority.len(), 0); } + + // Test case where we queue 2 requests to be sent to the same peer and 1 request to another + // peer. Same peer requests should be served one at a time but they should not block the other + // peer request. + #[test] + fn rate_limit_requests_to_same_peer() { + let mut request_manager = RequestManager::new(); + let mut response_manager = ResponseManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + + // Create 3 candidates + let mut candidate_receipt_1 = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data_1 = dummy_pvd(); + candidate_receipt_1.descriptor.persisted_validation_data_hash = + persisted_validation_data_1.hash(); + let candidate_1 = candidate_receipt_1.hash(); + + let mut candidate_receipt_2 = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data_2 = dummy_pvd(); + candidate_receipt_2.descriptor.persisted_validation_data_hash = + persisted_validation_data_2.hash(); + let candidate_2 = candidate_receipt_2.hash(); + + let mut candidate_receipt_3 = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data_3 = dummy_pvd(); + candidate_receipt_3.descriptor.persisted_validation_data_hash = + persisted_validation_data_3.hash(); + let candidate_3 = candidate_receipt_3.hash(); + + // Create 2 peers + let requested_peer_1 = PeerId::random(); + let requested_peer_2 = PeerId::random(); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + let unwanted_mask = StatementFilter::blank(group_size); + let disabled_mask: BitVec = Default::default(); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let peer_advertised = + |_identifier: &CandidateIdentifier, _peer: &_| Some(StatementFilter::full(group_size)); + + // Add request for candidate 1 from peer 1 + let identifier1 = request_manager + .get_or_insert(relay_parent, candidate_1, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate_1, 1.into()) + .add_peer(requested_peer_1); + + // Add request for candidate 3 from peer 2 (this one can be served in parallel) + let _identifier3 = request_manager + .get_or_insert(relay_parent, candidate_3, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate_3, 1.into()) + .add_peer(requested_peer_2); + + // Successfully dispatch request for candidate 1 from peer 1 and candidate 3 from peer 2 + for _ in 0..2 { + let outgoing = + request_manager.next_request(&mut response_manager, request_props, peer_advertised); + assert!(outgoing.is_some()); + } + assert_eq!(response_manager.active_peers.len(), 2); + assert!(response_manager.is_sending_to(&requested_peer_1)); + assert!(response_manager.is_sending_to(&requested_peer_2)); + assert_eq!(request_manager.requests.len(), 2); + + // Add request for candidate 2 from peer 1 + let _identifier2 = request_manager + .get_or_insert(relay_parent, candidate_2, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate_2, 1.into()) + .add_peer(requested_peer_1); + + // Do not dispatch the request for the second candidate from peer 1 (already serving that + // peer) + let outgoing = + request_manager.next_request(&mut response_manager, request_props, peer_advertised); + assert!(outgoing.is_none()); + assert_eq!(response_manager.active_peers.len(), 2); + assert!(response_manager.is_sending_to(&requested_peer_1)); + assert!(response_manager.is_sending_to(&requested_peer_2)); + assert_eq!(request_manager.requests.len(), 3); + + // Manually mark response received (response future resolved) + response_manager.active_peers.remove(&requested_peer_1); + response_manager.pending_responses = FuturesUnordered::new(); + + // Validate first response (candidate 1 from peer 1) + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier: identifier1, + requested_peer: requested_peer_1, + props: request_properties.clone(), + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt_1.clone(), + persisted_validation_data: persisted_validation_data_1.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let _output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + disabled_mask.clone(), + ); + + // First request served successfully + assert_eq!(request_manager.requests.len(), 2); + assert_eq!(response_manager.active_peers.len(), 1); + assert!(response_manager.is_sending_to(&requested_peer_2)); + } + + // Check if the request that was ignored previously will be served now + let outgoing = + request_manager.next_request(&mut response_manager, request_props, peer_advertised); + assert!(outgoing.is_some()); + assert_eq!(response_manager.active_peers.len(), 2); + assert!(response_manager.is_sending_to(&requested_peer_1)); + assert!(response_manager.is_sending_to(&requested_peer_2)); + assert_eq!(request_manager.requests.len(), 2); + } } diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 8cf13980214..c9de42d2c46 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -1891,7 +1891,7 @@ fn local_node_sanity_checks_incoming_requests() { let mask = StatementFilter::blank(state.config.group_size + 1); let response = state .send_request( - peer_c, + peer_a, request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await diff --git a/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml new file mode 100644 index 00000000000..14208425d62 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml @@ -0,0 +1,43 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "honest" + count = 4 + args = ["-lparachain=debug,parachain::statement-distribution=trace"] + + [[relaychain.nodes]] + image = "{{MALUS_IMAGE}}" + name = "malus" + command = "malus spam-statement-requests" + args = [ "--alice", "-lparachain=debug,MALUS=trace", "--spam-factor=1000" ] + +{% for id in range(2000,2001) %} +[[parachains]] +id = {{id}} + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug"] +{% endfor %} + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl new file mode 100644 index 00000000000..9985dd24ee3 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl @@ -0,0 +1,27 @@ +Description: Test if parachains progress when group is getting spammed by statement distribution requests. +Network: ./0012-spam-statement-distribution-requests.toml +Creds: config + +# Check authority status and peers. +malus: reports node_roles is 4 +honest: reports node_roles is 4 + +# Ensure parachains are registered. +honest: parachain 2000 is registered within 60 seconds + +# Ensure that malus is already attempting to DoS +malus: log line contains "😈 Duplicating AttestedCandidateV2 request" within 90 seconds + +# Ensure parachains made progress. +honest: parachain 2000 block height is at least 10 within 200 seconds + +# Ensure that honest nodes drop extra requests +honest: log line contains "Peer already being served, dropping request" within 60 seconds + +# Check lag - approval +honest: reports polkadot_parachain_approval_checking_finality_lag is 0 + +# Check lag - dispute conclusion +honest: reports polkadot_parachain_disputes_finality_lag is 0 + + diff --git a/prdoc/pr_3444.prdoc b/prdoc/pr_3444.prdoc new file mode 100644 index 00000000000..3afb3810641 --- /dev/null +++ b/prdoc/pr_3444.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Rate-limiting of statement distribution v2 requests to 1 per peer + +doc: + - audience: Node Dev + description: | + A new malicious node variant that sends duplicate statement + distribution messages to spam other peers. + + - audience: Node Operator + description: | + Added rate-limiting in the statement distribution request-response + protocol. Requesters will not issue another request to a peer if one + is already pending with that peer and receiving nodes will reject + requests from peers that they are currently serving. + This should reduce the risk of validator-validator DoS attacks and + better load-balance statement distribution. + +crates: + - name: polkadot-test-malus + bump: minor + - name: polkadot-statement-distribution + bump: minor -- GitLab From e5a93fbcd4a6acec7ab83865708e5c5df3534a7b Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 1 May 2024 22:01:55 +0200 Subject: [PATCH 098/107] HRMP - set `DefaultChannelSizeAndCapacityWithSystem` with dynamic values according to the `ActiveConfig` (#4332) ## Summary This PR enhances the capability to set `DefaultChannelSizeAndCapacityWithSystem` for HRMP. Currently, all testnets (Rococo, Westend) have a hard-coded value set as 'half of the maximum' determined by the live `ActiveConfig`. While this approach appears satisfactory, potential issues could arise if the live `ActiveConfig` are adjusted below these hard-coded values, necessitating a new runtime release with updated values. Additionally, hard-coded values have consequences, such as Rococo's benchmarks not functioning: https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/6082656. ## Solution The proposed solution here is to utilize `ActiveConfigHrmpChannelSizeAndCapacityRatio`, which reads the current `ActiveConfig` and calculates `DefaultChannelSizeAndCapacityWithSystem`, for example, "half of the maximum" based on live data. This way, whenever `ActiveConfig` is modified, `ActiveConfigHrmpChannelSizeAndCapacityRatio` automatically returns adjusted values with the appropriate ratio. Thus, manual adjustments and new runtime releases become unnecessary. Relates to a comment/discussion: https://github.com/paritytech/polkadot-sdk/pull/3721/files#r1541001420 Relates to a comment/discussion: https://github.com/paritytech/polkadot-sdk/pull/3721/files#r1549291588 --------- Co-authored-by: command-bot <> --- .../runtime/parachains/src/configuration.rs | 15 +- .../parachains/src/configuration/tests.rs | 50 +- polkadot/runtime/rococo/src/lib.rs | 8 +- .../src/weights/runtime_parachains_hrmp.rs | 429 +++++++++--------- polkadot/runtime/test-runtime/src/lib.rs | 13 +- polkadot/runtime/westend/src/lib.rs | 8 +- .../src/weights/runtime_parachains_hrmp.rs | 412 +++++++++-------- 7 files changed, 521 insertions(+), 414 deletions(-) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index b5dad6c6e86..34923897f02 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -30,7 +30,7 @@ use primitives::{ NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; -use sp_runtime::{traits::Zero, Perbill}; +use sp_runtime::{traits::Zero, Perbill, Percent}; use sp_std::prelude::*; #[cfg(test)] @@ -1460,3 +1460,16 @@ impl Pallet { Ok(()) } } + +/// The implementation of `Get<(u32, u32)>` which reads `ActiveConfig` and returns `P` percent of +/// `hrmp_channel_max_message_size` / `hrmp_channel_max_capacity`. +pub struct ActiveConfigHrmpChannelSizeAndCapacityRatio(sp_std::marker::PhantomData<(T, P)>); +impl> Get<(u32, u32)> + for ActiveConfigHrmpChannelSizeAndCapacityRatio +{ + fn get() -> (u32, u32) { + let config = ActiveConfig::::get(); + let percent = P::get(); + (percent * config.hrmp_channel_max_message_size, percent * config.hrmp_channel_max_capacity) + } +} diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index 239b466fde3..64bbb8481fc 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -17,7 +17,7 @@ use super::*; use crate::{ configuration, - mock::{new_test_ext, Configuration, ParasShared, RuntimeOrigin, Test}, + mock::{new_test_ext, Configuration, MockGenesisConfig, ParasShared, RuntimeOrigin, Test}, }; use bitvec::{bitvec, prelude::Lsb0}; use frame_support::{assert_err, assert_noop, assert_ok}; @@ -547,3 +547,51 @@ fn verify_externally_accessible() { ); }); } + +#[test] +fn active_config_hrmp_channel_size_and_capacity_ratio_works() { + frame_support::parameter_types! { + pub Ratio100: Percent = Percent::from_percent(100); + pub Ratio50: Percent = Percent::from_percent(50); + } + + let mut genesis: MockGenesisConfig = Default::default(); + genesis.configuration.config.hrmp_channel_max_message_size = 1024; + genesis.configuration.config.hrmp_channel_max_capacity = 100; + + new_test_ext(genesis).execute_with(|| { + let active_config = configuration::ActiveConfig::::get(); + assert_eq!(active_config.hrmp_channel_max_message_size, 1024); + assert_eq!(active_config.hrmp_channel_max_capacity, 100); + + assert_eq!( + ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(), + (1024, 100) + ); + assert_eq!(ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(), (512, 50)); + + // change ActiveConfig + assert_ok!(Configuration::set_hrmp_channel_max_message_size( + RuntimeOrigin::root(), + active_config.hrmp_channel_max_message_size * 4 + )); + assert_ok!(Configuration::set_hrmp_channel_max_capacity( + RuntimeOrigin::root(), + active_config.hrmp_channel_max_capacity * 4 + )); + on_new_session(1); + on_new_session(2); + let active_config = configuration::ActiveConfig::::get(); + assert_eq!(active_config.hrmp_channel_max_message_size, 4096); + assert_eq!(active_config.hrmp_channel_max_capacity, 400); + + assert_eq!( + ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(), + (4096, 400) + ); + assert_eq!( + ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(), + (2048, 200) + ); + }) +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 287ae9937da..a52d800ad38 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -53,6 +53,7 @@ use runtime_common::{ use runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, + configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -1033,7 +1034,7 @@ impl pallet_message_queue::Config for Runtime { impl parachains_dmp::Config for Runtime {} parameter_types! { - pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (51200, 500); + pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100); } impl parachains_hrmp::Config for Runtime { @@ -1041,7 +1042,10 @@ impl parachains_hrmp::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelManager = EnsureRoot; type Currency = Balances; - type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; + type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio< + Runtime, + HrmpChannelSizeAndCapacityWithSystemRatio, + >; type WeightInfo = weights::runtime_parachains_hrmp::WeightInfo; } diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs index 97b84155b36..1d83e97ef0e 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs @@ -16,25 +16,26 @@ //! Autogenerated weights for `runtime_parachains::hrmp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::hrmp // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_hrmp.rs +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_parachains::hrmp +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,105 +48,97 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::hrmp`. pub struct WeightInfo(PhantomData); impl runtime_parachains::hrmp::WeightInfo for WeightInfo { - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_init_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `6644` - // Minimum execution time: 41_564_000 picoseconds. - Weight::from_parts(42_048_000, 0) - .saturating_add(Weight::from_parts(0, 6644)) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `488` + // Estimated: `3953` + // Minimum execution time: 34_034_000 picoseconds. + Weight::from_parts(35_191_000, 0) + .saturating_add(Weight::from_parts(0, 3953)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_accept_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `936` - // Estimated: `4401` - // Minimum execution time: 43_570_000 picoseconds. - Weight::from_parts(44_089_000, 0) - .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `478` + // Estimated: `3943` + // Minimum execution time: 30_115_000 picoseconds. + Weight::from_parts(31_060_000, 0) + .saturating_add(Weight::from_parts(0, 3943)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_close_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `807` - // Estimated: `4272` - // Minimum execution time: 36_594_000 picoseconds. - Weight::from_parts(37_090_000, 0) - .saturating_add(Weight::from_parts(0, 4272)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `591` + // Estimated: `4056` + // Minimum execution time: 30_982_000 picoseconds. + Weight::from_parts(32_034_000, 0) + .saturating_add(Weight::from_parts(0, 4056)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:254 w:254) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:254) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:254 w:254) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 127]`. /// The range of component `e` is `[0, 127]`. fn force_clean_hrmp(i: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + e * (100 ±0) + i * (100 ±0)` - // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)` - // Minimum execution time: 1_085_140_000 picoseconds. - Weight::from_parts(1_100_901_000, 0) - .saturating_add(Weight::from_parts(0, 3726)) - // Standard Error: 98_982 - .saturating_add(Weight::from_parts(3_229_112, 0).saturating_mul(i.into())) - // Standard Error: 98_982 - .saturating_add(Weight::from_parts(3_210_944, 0).saturating_mul(e.into())) + // Measured: `297 + e * (100 ±0) + i * (100 ±0)` + // Estimated: `3759 + e * (2575 ±0) + i * (2575 ±0)` + // Minimum execution time: 1_158_665_000 picoseconds. + Weight::from_parts(1_164_378_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + // Standard Error: 103_726 + .saturating_add(Weight::from_parts(3_444_855, 0).saturating_mul(i.into())) + // Standard Error: 103_726 + .saturating_add(Weight::from_parts(3_527_628, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into()))) @@ -155,139 +148,139 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into())) .saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into())) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:256 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:256 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_open(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `779 + c * (136 ±0)` - // Estimated: `2234 + c * (5086 ±0)` - // Minimum execution time: 10_497_000 picoseconds. - Weight::from_parts(6_987_455, 0) - .saturating_add(Weight::from_parts(0, 2234)) - // Standard Error: 18_540 - .saturating_add(Weight::from_parts(18_788_534, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `525 + c * (136 ±0)` + // Estimated: `1980 + c * (5086 ±0)` + // Minimum execution time: 5_870_000 picoseconds. + Weight::from_parts(2_363_864, 0) + .saturating_add(Weight::from_parts(0, 1980)) + // Standard Error: 16_657 + .saturating_add(Weight::from_parts(20_507_232, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:128 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:0 w:128) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:128 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_close(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `335 + c * (124 ±0)` - // Estimated: `1795 + c * (2600 ±0)` - // Minimum execution time: 6_575_000 picoseconds. - Weight::from_parts(1_228_642, 0) - .saturating_add(Weight::from_parts(0, 1795)) - // Standard Error: 14_826 - .saturating_add(Weight::from_parts(11_604_038, 0).saturating_mul(c.into())) + // Measured: `368 + c * (124 ±0)` + // Estimated: `1828 + c * (2600 ±0)` + // Minimum execution time: 4_766_000 picoseconds. + Weight::from_parts(4_988_812, 0) + .saturating_add(Weight::from_parts(0, 1828)) + // Standard Error: 10_606 + .saturating_add(Weight::from_parts(12_579_429, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn hrmp_cancel_open_request(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1026 + c * (13 ±0)` - // Estimated: `4295 + c * (15 ±0)` - // Minimum execution time: 22_301_000 picoseconds. - Weight::from_parts(26_131_473, 0) - .saturating_add(Weight::from_parts(0, 4295)) - // Standard Error: 830 - .saturating_add(Weight::from_parts(49_448, 0).saturating_mul(c.into())) + // Measured: `1059 + c * (13 ±0)` + // Estimated: `4328 + c * (15 ±0)` + // Minimum execution time: 17_228_000 picoseconds. + Weight::from_parts(27_236_563, 0) + .saturating_add(Weight::from_parts(0, 4328)) + // Standard Error: 2_419 + .saturating_add(Weight::from_parts(102_107, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn clean_open_channel_requests(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `243 + c * (63 ±0)` - // Estimated: `1722 + c * (2538 ±0)` - // Minimum execution time: 5_234_000 picoseconds. - Weight::from_parts(7_350_270, 0) - .saturating_add(Weight::from_parts(0, 1722)) - // Standard Error: 3_105 - .saturating_add(Weight::from_parts(2_981_935, 0).saturating_mul(c.into())) + // Measured: `276 + c * (63 ±0)` + // Estimated: `1755 + c * (2538 ±0)` + // Minimum execution time: 3_549_000 picoseconds. + Weight::from_parts(5_799_542, 0) + .saturating_add(Weight::from_parts(0, 1755)) + // Standard Error: 3_025 + .saturating_add(Weight::from_parts(3_173_294, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into())) } - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - fn force_open_hrmp_channel(_c: u32, ) -> Weight { + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 1]`. + fn force_open_hrmp_channel(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `6644` - // Minimum execution time: 55_611_000 picoseconds. - Weight::from_parts(56_488_000, 0) - .saturating_add(Weight::from_parts(0, 6644)) - .saturating_add(T::DbWeight::get().reads(14)) + // Measured: `488 + c * (235 ±0)` + // Estimated: `6428 + c * (235 ±0)` + // Minimum execution time: 48_392_000 picoseconds. + Weight::from_parts(50_509_977, 0) + .saturating_add(Weight::from_parts(0, 6428)) + // Standard Error: 133_658 + .saturating_add(Weight::from_parts(10_215_322, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) + .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into())) } /// Storage: `Paras::ParaLifecycles` (r:1 w:0) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -311,11 +304,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) fn establish_system_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `6357` - // Minimum execution time: 629_674_000 picoseconds. - Weight::from_parts(640_174_000, 0) - .saturating_add(Weight::from_parts(0, 6357)) + // Measured: `488` + // Estimated: `6428` + // Minimum execution time: 48_465_000 picoseconds. + Weight::from_parts(50_433_000, 0) + .saturating_add(Weight::from_parts(0, 6428)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) } @@ -323,22 +316,42 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_channel_deposits() -> Weight { // Proof Size summary in bytes: - // Measured: `263` - // Estimated: `3728` - // Minimum execution time: 173_371_000 picoseconds. - Weight::from_parts(175_860_000, 0) - .saturating_add(Weight::from_parts(0, 3728)) + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 11_835_000 picoseconds. + Weight::from_parts(12_380_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Paras::ParaLifecycles` (r:2 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:2 w:2) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:2 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:2 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:2 w:2) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:2 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:2 w:2) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) fn establish_channel_with_system() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `6357` - // Minimum execution time: 629_674_000 picoseconds. - Weight::from_parts(640_174_000, 0) - .saturating_add(Weight::from_parts(0, 6357)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `488` + // Estimated: `6428` + // Minimum execution time: 79_633_000 picoseconds. + Weight::from_parts(80_846_000, 0) + .saturating_add(Weight::from_parts(0, 6428)) + .saturating_add(T::DbWeight::get().reads(19)) + .saturating_add(T::DbWeight::get().writes(11)) } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 87becf73cb7..b56e2f52f5f 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -29,7 +29,9 @@ use sp_std::{ use polkadot_runtime_parachains::{ assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, disputes as parachains_disputes, + configuration as parachains_configuration, + configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, + disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, @@ -78,7 +80,7 @@ use sp_runtime::{ SaturatedConversion, StaticLookup, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -557,7 +559,7 @@ impl parachains_dmp::Config for Runtime {} parameter_types! { pub const FirstMessageFactorPercent: u64 = 100; - pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (51200, 500); + pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100); } impl parachains_hrmp::Config for Runtime { @@ -565,7 +567,10 @@ impl parachains_hrmp::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelManager = frame_system::EnsureRoot; type Currency = Balances; - type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; + type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio< + Runtime, + HrmpChannelSizeAndCapacityWithSystemRatio, + >; type WeightInfo = parachains_hrmp::TestWeightInfo; } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7125f5d34c4..f51233cabf0 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -68,6 +68,7 @@ use runtime_common::{ use runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, + configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -1163,7 +1164,7 @@ impl pallet_message_queue::Config for Runtime { impl parachains_dmp::Config for Runtime {} parameter_types! { - pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (4096, 4); + pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100); } impl parachains_hrmp::Config for Runtime { @@ -1171,7 +1172,10 @@ impl parachains_hrmp::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ChannelManager = EnsureRoot; type Currency = Balances; - type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem; + type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio< + Runtime, + HrmpChannelSizeAndCapacityWithSystemRatio, + >; type WeightInfo = weights::runtime_parachains_hrmp::WeightInfo; } diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs index 3d2ab827b8f..529bdf76105 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs @@ -16,28 +16,26 @@ //! Autogenerated weights for `runtime_parachains::hrmp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::hrmp // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/runtime_parachains_hrmp.rs +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=runtime_parachains::hrmp +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,99 +48,97 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::hrmp`. pub struct WeightInfo(PhantomData); impl runtime_parachains::hrmp::WeightInfo for WeightInfo { - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_init_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `307` - // Estimated: `6247` - // Minimum execution time: 35_676_000 picoseconds. - Weight::from_parts(36_608_000, 0) - .saturating_add(Weight::from_parts(0, 6247)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `455` + // Estimated: `3920` + // Minimum execution time: 32_195_000 picoseconds. + Weight::from_parts(33_340_000, 0) + .saturating_add(Weight::from_parts(0, 3920)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_accept_open_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `421` - // Estimated: `3886` - // Minimum execution time: 32_773_000 picoseconds. - Weight::from_parts(33_563_000, 0) - .saturating_add(Weight::from_parts(0, 3886)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `445` + // Estimated: `3910` + // Minimum execution time: 28_644_000 picoseconds. + Weight::from_parts(29_581_000, 0) + .saturating_add(Weight::from_parts(0, 3910)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn hrmp_close_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `238` - // Estimated: `3703` - // Minimum execution time: 28_134_000 picoseconds. - Weight::from_parts(29_236_000, 0) - .saturating_add(Weight::from_parts(0, 3703)) + // Measured: `558` + // Estimated: `4023` + // Minimum execution time: 31_824_000 picoseconds. + Weight::from_parts(33_207_000, 0) + .saturating_add(Weight::from_parts(0, 4023)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:254 w:254) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:254) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:0 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:254 w:254) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 127]`. /// The range of component `e` is `[0, 127]`. fn force_clean_hrmp(i: u32, e: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `158 + e * (100 ±0) + i * (100 ±0)` - // Estimated: `3620 + e * (2575 ±0) + i * (2575 ±0)` - // Minimum execution time: 1_217_145_000 picoseconds. - Weight::from_parts(1_251_187_000, 0) - .saturating_add(Weight::from_parts(0, 3620)) - // Standard Error: 118_884 - .saturating_add(Weight::from_parts(4_002_678, 0).saturating_mul(i.into())) - // Standard Error: 118_884 - .saturating_add(Weight::from_parts(3_641_596, 0).saturating_mul(e.into())) + // Measured: `264 + e * (100 ±0) + i * (100 ±0)` + // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)` + // Minimum execution time: 1_213_331_000 picoseconds. + Weight::from_parts(1_217_120_000, 0) + .saturating_add(Weight::from_parts(0, 3726)) + // Standard Error: 108_190 + .saturating_add(Weight::from_parts(3_485_701, 0).saturating_mul(i.into())) + // Standard Error: 108_190 + .saturating_add(Weight::from_parts(3_564_287, 0).saturating_mul(e.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into()))) @@ -152,135 +148,139 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf .saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into())) .saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:256 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:128 w:128) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:256 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_open(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `386 + c * (136 ±0)` - // Estimated: `1841 + c * (5086 ±0)` - // Minimum execution time: 6_277_000 picoseconds. - Weight::from_parts(6_357_000, 0) - .saturating_add(Weight::from_parts(0, 1841)) - // Standard Error: 41_189 - .saturating_add(Weight::from_parts(22_159_709, 0).saturating_mul(c.into())) + // Measured: `492 + c * (136 ±0)` + // Estimated: `1947 + c * (5086 ±0)` + // Minimum execution time: 6_040_000 picoseconds. + Weight::from_parts(5_644_307, 0) + .saturating_add(Weight::from_parts(0, 1947)) + // Standard Error: 12_852 + .saturating_add(Weight::from_parts(21_031_626, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:128 w:128) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpCloseChannelRequests (r:0 w:128) - /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannelContents (r:0 w:128) - /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:128 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn force_process_hrmp_close(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `229 + c * (124 ±0)` - // Estimated: `1689 + c * (2600 ±0)` - // Minimum execution time: 5_070_000 picoseconds. - Weight::from_parts(5_225_000, 0) - .saturating_add(Weight::from_parts(0, 1689)) - // Standard Error: 24_173 - .saturating_add(Weight::from_parts(13_645_307, 0).saturating_mul(c.into())) + // Measured: `335 + c * (124 ±0)` + // Estimated: `1795 + c * (2600 ±0)` + // Minimum execution time: 4_950_000 picoseconds. + Weight::from_parts(5_215_558, 0) + .saturating_add(Weight::from_parts(0, 1795)) + // Standard Error: 9_231 + .saturating_add(Weight::from_parts(12_770_147, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn hrmp_cancel_open_request(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `920 + c * (13 ±0)` - // Estimated: `4189 + c * (15 ±0)` - // Minimum execution time: 20_449_000 picoseconds. - Weight::from_parts(30_861_799, 0) - .saturating_add(Weight::from_parts(0, 4189)) - // Standard Error: 6_642 - .saturating_add(Weight::from_parts(236_293, 0).saturating_mul(c.into())) + // Measured: `1026 + c * (13 ±0)` + // Estimated: `4295 + c * (15 ±0)` + // Minimum execution time: 17_550_000 picoseconds. + Weight::from_parts(25_522_933, 0) + .saturating_add(Weight::from_parts(0, 4295)) + // Standard Error: 2_332 + .saturating_add(Weight::from_parts(121_128, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into())) } - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 128]`. fn clean_open_channel_requests(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `137 + c * (63 ±0)` - // Estimated: `1616 + c * (2538 ±0)` - // Minimum execution time: 3_911_000 picoseconds. - Weight::from_parts(5_219_837, 0) - .saturating_add(Weight::from_parts(0, 1616)) - // Standard Error: 10_219 - .saturating_add(Weight::from_parts(3_647_782, 0).saturating_mul(c.into())) + // Measured: `243 + c * (63 ±0)` + // Estimated: `1722 + c * (2538 ±0)` + // Minimum execution time: 3_782_000 picoseconds. + Weight::from_parts(5_263_610, 0) + .saturating_add(Weight::from_parts(0, 1722)) + // Standard Error: 3_152 + .saturating_add(Weight::from_parts(3_309_777, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) .saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into())) } - /// Storage: Paras ParaLifecycles (r:2 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpChannels (r:1 w:0) - /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1) - /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:2 w:2) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0) - /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured) - /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1) - /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured) - fn force_open_hrmp_channel(_c: u32, ) -> Weight { + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 1]`. + fn force_open_hrmp_channel(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `307` - // Estimated: `6247` - // Minimum execution time: 50_870_000 picoseconds. - Weight::from_parts(53_335_000, 0) - .saturating_add(Weight::from_parts(0, 6247)) - .saturating_add(T::DbWeight::get().reads(13)) + // Measured: `455 + c * (235 ±0)` + // Estimated: `6395 + c * (235 ±0)` + // Minimum execution time: 46_445_000 picoseconds. + Weight::from_parts(48_376_448, 0) + .saturating_add(Weight::from_parts(0, 6395)) + // Standard Error: 130_148 + .saturating_add(Weight::from_parts(13_606_551, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) + .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into())) } /// Storage: `Paras::ParaLifecycles` (r:1 w:0) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -304,11 +304,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) fn establish_system_channel() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `6357` - // Minimum execution time: 629_674_000 picoseconds. - Weight::from_parts(640_174_000, 0) - .saturating_add(Weight::from_parts(0, 6357)) + // Measured: `455` + // Estimated: `6395` + // Minimum execution time: 46_563_000 picoseconds. + Weight::from_parts(48_015_000, 0) + .saturating_add(Weight::from_parts(0, 6395)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(8)) } @@ -318,20 +318,40 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `263` // Estimated: `3728` - // Minimum execution time: 173_371_000 picoseconds. - Weight::from_parts(175_860_000, 0) + // Minimum execution time: 12_252_000 picoseconds. + Weight::from_parts(12_550_000, 0) .saturating_add(Weight::from_parts(0, 3728)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Paras::ParaLifecycles` (r:2 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:2 w:2) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:2 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:2 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:2 w:2) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:2 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:2 w:2) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) fn establish_channel_with_system() -> Weight { // Proof Size summary in bytes: - // Measured: `417` - // Estimated: `6357` - // Minimum execution time: 629_674_000 picoseconds. - Weight::from_parts(640_174_000, 0) - .saturating_add(Weight::from_parts(0, 6357)) - .saturating_add(T::DbWeight::get().reads(12)) - .saturating_add(T::DbWeight::get().writes(8)) + // Measured: `455` + // Estimated: `6395` + // Minimum execution time: 79_503_000 picoseconds. + Weight::from_parts(81_630_000, 0) + .saturating_add(Weight::from_parts(0, 6395)) + .saturating_add(T::DbWeight::get().reads(19)) + .saturating_add(T::DbWeight::get().writes(11)) } } -- GitLab From 14c4afc5382dcb18095af5c090e2aa1af65ecae6 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 2 May 2024 09:21:11 +0200 Subject: [PATCH 099/107] [Backport] Version bumps and reorg prdocs from 1.11.0 (#4336) This PR backports version bumps and reorganization of the `prdocs` from `1.11.0` release branch back to master --- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 4 ++-- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 4 ++-- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 4 ++-- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 4 ++-- .../runtimes/collectives/collectives-westend/src/lib.rs | 4 ++-- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 4 ++-- .../parachains/runtimes/coretime/coretime-rococo/src/lib.rs | 4 ++-- .../parachains/runtimes/coretime/coretime-westend/src/lib.rs | 4 ++-- .../parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/people/people-rococo/src/lib.rs | 4 ++-- cumulus/parachains/runtimes/people/people-westend/src/lib.rs | 4 ++-- .../parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/node/primitives/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 4 ++-- polkadot/runtime/westend/src/lib.rs | 4 ++-- prdoc/{ => 1.11.0}/pr_2119.prdoc | 0 prdoc/{ => 1.11.0}/pr_2292.prdoc | 0 prdoc/{ => 1.11.0}/pr_2714.prdoc | 0 prdoc/{ => 1.11.0}/pr_2944.prdoc | 0 prdoc/{ => 1.11.0}/pr_3250.prdoc | 0 prdoc/{ => 1.11.0}/pr_3251.prdoc | 0 prdoc/{ => 1.11.0}/pr_3455.prdoc | 0 prdoc/{ => 1.11.0}/pr_3485.prdoc | 0 prdoc/{ => 1.11.0}/pr_3512.prdoc | 0 prdoc/{ => 1.11.0}/pr_3630.prdoc | 0 prdoc/{ => 1.11.0}/pr_3659.prdoc | 0 prdoc/{ => 1.11.0}/pr_3660.prdoc | 0 prdoc/{ => 1.11.0}/pr_3695.prdoc | 0 prdoc/{ => 1.11.0}/pr_3708.prdoc | 0 prdoc/{ => 1.11.0}/pr_3721.prdoc | 0 prdoc/{ => 1.11.0}/pr_3789.prdoc | 0 prdoc/{ => 1.11.0}/pr_3801.prdoc | 0 prdoc/{ => 1.11.0}/pr_3813.prdoc | 0 prdoc/{ => 1.11.0}/pr_3852.prdoc | 0 prdoc/{ => 1.11.0}/pr_3875.prdoc | 0 prdoc/{ => 1.11.0}/pr_3889.prdoc | 0 prdoc/{ => 1.11.0}/pr_3915.prdoc | 0 prdoc/{ => 1.11.0}/pr_3930.prdoc | 0 prdoc/{ => 1.11.0}/pr_3934.prdoc | 0 prdoc/{ => 1.11.0}/pr_3953.prdoc | 0 prdoc/{ => 1.11.0}/pr_3959.prdoc | 0 prdoc/{ => 1.11.0}/pr_3976.prdoc | 0 prdoc/{ => 1.11.0}/pr_3979.prdoc | 0 prdoc/{ => 1.11.0}/pr_3983.prdoc | 0 prdoc/{ => 1.11.0}/pr_3997.prdoc | 0 prdoc/{ => 1.11.0}/pr_4006.prdoc | 0 prdoc/{ => 1.11.0}/pr_4015.prdoc | 0 prdoc/{ => 1.11.0}/pr_4017.prdoc | 0 prdoc/{ => 1.11.0}/pr_4021.prdoc | 0 prdoc/{ => 1.11.0}/pr_4027.prdoc | 0 prdoc/{ => 1.11.0}/pr_4037.prdoc | 0 prdoc/{ => 1.11.0}/pr_4059.prdoc | 0 prdoc/{ => 1.11.0}/pr_4060.prdoc | 0 prdoc/{ => 1.11.0}/pr_4070.prdoc | 0 prdoc/{ => 1.11.0}/pr_4072.prdoc | 0 prdoc/{ => 1.11.0}/pr_4075.prdoc | 0 prdoc/{ => 1.11.0}/pr_4089.prdoc | 0 prdoc/{ => 1.11.0}/pr_4118.prdoc | 0 prdoc/{ => 1.11.0}/pr_4151.prdoc | 0 prdoc/{ => 1.11.0}/pr_4156.prdoc | 0 prdoc/{ => 1.11.0}/pr_4168.prdoc | 0 prdoc/{ => 1.11.0}/pr_4169.prdoc | 0 prdoc/{ => 1.11.0}/pr_4171.prdoc | 0 prdoc/{ => 1.11.0}/pr_4177.prdoc | 0 prdoc/{ => 1.11.0}/pr_4189.prdoc | 0 prdoc/{ => 1.11.0}/pr_4199.prdoc | 0 prdoc/{ => 1.11.0}/pr_4208.prdoc | 0 prdoc/{ => 1.11.0}/pr_4221.prdoc | 0 prdoc/{ => 1.11.0}/pr_4229.prdoc | 0 prdoc/{ => 1.11.0}/pr_4252.prdoc | 0 70 files changed, 27 insertions(+), 27 deletions(-) rename prdoc/{ => 1.11.0}/pr_2119.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_2292.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_2714.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_2944.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3250.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3251.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3455.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3485.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3512.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3630.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3659.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3660.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3695.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3708.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3721.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3789.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3801.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3813.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3852.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3875.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3889.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3915.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3930.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3934.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3953.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3959.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3976.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3979.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3983.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_3997.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4006.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4015.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4017.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4021.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4027.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4037.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4059.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4060.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4070.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4072.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4075.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4089.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4118.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4151.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4156.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4168.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4169.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4171.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4177.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4189.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4199.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4208.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4221.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4229.prdoc (100%) rename prdoc/{ => 1.11.0}/pr_4252.prdoc (100%) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 15173480463..c4f4bd4c1ee 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -112,10 +112,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 14, + transaction_version: 15, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 64127c80b6d..a3593732f31 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -110,10 +110,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 14, + transaction_version: 15, state_version: 0, }; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 7c2aa490886..2a7f46feee6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -209,10 +209,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 4, + transaction_version: 5, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 640eaf881a5..4c467010c7c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -183,10 +183,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 4, + transaction_version: 5, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 7274e9acdcd..d9839060408 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -117,10 +117,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 5, + transaction_version: 6, state_version: 0, }; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 988195d88d8..df39cd811d1 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -136,10 +136,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 6, + transaction_version: 7, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 895890da7dd..ab925b04eb7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -136,10 +136,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 9d080087d5d..61c7b6e4958 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -136,10 +136,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 996f7655c03..424fa9cb7e7 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 4a57bad01c8..544b2e78a46 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -128,10 +128,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 22e8fd57d3c..50c818a2022 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -128,10 +128,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + transaction_version: 1, state_version: 1, }; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 11da6adb819..e762cec9093 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 375aacd5832..0f97250a934 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -58,7 +58,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.10.0"; +pub const NODE_VERSION: &'static str = "1.11.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index a52d800ad38..56f4aa40ef1 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -162,10 +162,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 24, + transaction_version: 25, state_version: 1, }; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index f51233cabf0..0a5312b538b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -154,10 +154,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_010_000, + spec_version: 1_011_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 24, + transaction_version: 25, state_version: 1, }; diff --git a/prdoc/pr_2119.prdoc b/prdoc/1.11.0/pr_2119.prdoc similarity index 100% rename from prdoc/pr_2119.prdoc rename to prdoc/1.11.0/pr_2119.prdoc diff --git a/prdoc/pr_2292.prdoc b/prdoc/1.11.0/pr_2292.prdoc similarity index 100% rename from prdoc/pr_2292.prdoc rename to prdoc/1.11.0/pr_2292.prdoc diff --git a/prdoc/pr_2714.prdoc b/prdoc/1.11.0/pr_2714.prdoc similarity index 100% rename from prdoc/pr_2714.prdoc rename to prdoc/1.11.0/pr_2714.prdoc diff --git a/prdoc/pr_2944.prdoc b/prdoc/1.11.0/pr_2944.prdoc similarity index 100% rename from prdoc/pr_2944.prdoc rename to prdoc/1.11.0/pr_2944.prdoc diff --git a/prdoc/pr_3250.prdoc b/prdoc/1.11.0/pr_3250.prdoc similarity index 100% rename from prdoc/pr_3250.prdoc rename to prdoc/1.11.0/pr_3250.prdoc diff --git a/prdoc/pr_3251.prdoc b/prdoc/1.11.0/pr_3251.prdoc similarity index 100% rename from prdoc/pr_3251.prdoc rename to prdoc/1.11.0/pr_3251.prdoc diff --git a/prdoc/pr_3455.prdoc b/prdoc/1.11.0/pr_3455.prdoc similarity index 100% rename from prdoc/pr_3455.prdoc rename to prdoc/1.11.0/pr_3455.prdoc diff --git a/prdoc/pr_3485.prdoc b/prdoc/1.11.0/pr_3485.prdoc similarity index 100% rename from prdoc/pr_3485.prdoc rename to prdoc/1.11.0/pr_3485.prdoc diff --git a/prdoc/pr_3512.prdoc b/prdoc/1.11.0/pr_3512.prdoc similarity index 100% rename from prdoc/pr_3512.prdoc rename to prdoc/1.11.0/pr_3512.prdoc diff --git a/prdoc/pr_3630.prdoc b/prdoc/1.11.0/pr_3630.prdoc similarity index 100% rename from prdoc/pr_3630.prdoc rename to prdoc/1.11.0/pr_3630.prdoc diff --git a/prdoc/pr_3659.prdoc b/prdoc/1.11.0/pr_3659.prdoc similarity index 100% rename from prdoc/pr_3659.prdoc rename to prdoc/1.11.0/pr_3659.prdoc diff --git a/prdoc/pr_3660.prdoc b/prdoc/1.11.0/pr_3660.prdoc similarity index 100% rename from prdoc/pr_3660.prdoc rename to prdoc/1.11.0/pr_3660.prdoc diff --git a/prdoc/pr_3695.prdoc b/prdoc/1.11.0/pr_3695.prdoc similarity index 100% rename from prdoc/pr_3695.prdoc rename to prdoc/1.11.0/pr_3695.prdoc diff --git a/prdoc/pr_3708.prdoc b/prdoc/1.11.0/pr_3708.prdoc similarity index 100% rename from prdoc/pr_3708.prdoc rename to prdoc/1.11.0/pr_3708.prdoc diff --git a/prdoc/pr_3721.prdoc b/prdoc/1.11.0/pr_3721.prdoc similarity index 100% rename from prdoc/pr_3721.prdoc rename to prdoc/1.11.0/pr_3721.prdoc diff --git a/prdoc/pr_3789.prdoc b/prdoc/1.11.0/pr_3789.prdoc similarity index 100% rename from prdoc/pr_3789.prdoc rename to prdoc/1.11.0/pr_3789.prdoc diff --git a/prdoc/pr_3801.prdoc b/prdoc/1.11.0/pr_3801.prdoc similarity index 100% rename from prdoc/pr_3801.prdoc rename to prdoc/1.11.0/pr_3801.prdoc diff --git a/prdoc/pr_3813.prdoc b/prdoc/1.11.0/pr_3813.prdoc similarity index 100% rename from prdoc/pr_3813.prdoc rename to prdoc/1.11.0/pr_3813.prdoc diff --git a/prdoc/pr_3852.prdoc b/prdoc/1.11.0/pr_3852.prdoc similarity index 100% rename from prdoc/pr_3852.prdoc rename to prdoc/1.11.0/pr_3852.prdoc diff --git a/prdoc/pr_3875.prdoc b/prdoc/1.11.0/pr_3875.prdoc similarity index 100% rename from prdoc/pr_3875.prdoc rename to prdoc/1.11.0/pr_3875.prdoc diff --git a/prdoc/pr_3889.prdoc b/prdoc/1.11.0/pr_3889.prdoc similarity index 100% rename from prdoc/pr_3889.prdoc rename to prdoc/1.11.0/pr_3889.prdoc diff --git a/prdoc/pr_3915.prdoc b/prdoc/1.11.0/pr_3915.prdoc similarity index 100% rename from prdoc/pr_3915.prdoc rename to prdoc/1.11.0/pr_3915.prdoc diff --git a/prdoc/pr_3930.prdoc b/prdoc/1.11.0/pr_3930.prdoc similarity index 100% rename from prdoc/pr_3930.prdoc rename to prdoc/1.11.0/pr_3930.prdoc diff --git a/prdoc/pr_3934.prdoc b/prdoc/1.11.0/pr_3934.prdoc similarity index 100% rename from prdoc/pr_3934.prdoc rename to prdoc/1.11.0/pr_3934.prdoc diff --git a/prdoc/pr_3953.prdoc b/prdoc/1.11.0/pr_3953.prdoc similarity index 100% rename from prdoc/pr_3953.prdoc rename to prdoc/1.11.0/pr_3953.prdoc diff --git a/prdoc/pr_3959.prdoc b/prdoc/1.11.0/pr_3959.prdoc similarity index 100% rename from prdoc/pr_3959.prdoc rename to prdoc/1.11.0/pr_3959.prdoc diff --git a/prdoc/pr_3976.prdoc b/prdoc/1.11.0/pr_3976.prdoc similarity index 100% rename from prdoc/pr_3976.prdoc rename to prdoc/1.11.0/pr_3976.prdoc diff --git a/prdoc/pr_3979.prdoc b/prdoc/1.11.0/pr_3979.prdoc similarity index 100% rename from prdoc/pr_3979.prdoc rename to prdoc/1.11.0/pr_3979.prdoc diff --git a/prdoc/pr_3983.prdoc b/prdoc/1.11.0/pr_3983.prdoc similarity index 100% rename from prdoc/pr_3983.prdoc rename to prdoc/1.11.0/pr_3983.prdoc diff --git a/prdoc/pr_3997.prdoc b/prdoc/1.11.0/pr_3997.prdoc similarity index 100% rename from prdoc/pr_3997.prdoc rename to prdoc/1.11.0/pr_3997.prdoc diff --git a/prdoc/pr_4006.prdoc b/prdoc/1.11.0/pr_4006.prdoc similarity index 100% rename from prdoc/pr_4006.prdoc rename to prdoc/1.11.0/pr_4006.prdoc diff --git a/prdoc/pr_4015.prdoc b/prdoc/1.11.0/pr_4015.prdoc similarity index 100% rename from prdoc/pr_4015.prdoc rename to prdoc/1.11.0/pr_4015.prdoc diff --git a/prdoc/pr_4017.prdoc b/prdoc/1.11.0/pr_4017.prdoc similarity index 100% rename from prdoc/pr_4017.prdoc rename to prdoc/1.11.0/pr_4017.prdoc diff --git a/prdoc/pr_4021.prdoc b/prdoc/1.11.0/pr_4021.prdoc similarity index 100% rename from prdoc/pr_4021.prdoc rename to prdoc/1.11.0/pr_4021.prdoc diff --git a/prdoc/pr_4027.prdoc b/prdoc/1.11.0/pr_4027.prdoc similarity index 100% rename from prdoc/pr_4027.prdoc rename to prdoc/1.11.0/pr_4027.prdoc diff --git a/prdoc/pr_4037.prdoc b/prdoc/1.11.0/pr_4037.prdoc similarity index 100% rename from prdoc/pr_4037.prdoc rename to prdoc/1.11.0/pr_4037.prdoc diff --git a/prdoc/pr_4059.prdoc b/prdoc/1.11.0/pr_4059.prdoc similarity index 100% rename from prdoc/pr_4059.prdoc rename to prdoc/1.11.0/pr_4059.prdoc diff --git a/prdoc/pr_4060.prdoc b/prdoc/1.11.0/pr_4060.prdoc similarity index 100% rename from prdoc/pr_4060.prdoc rename to prdoc/1.11.0/pr_4060.prdoc diff --git a/prdoc/pr_4070.prdoc b/prdoc/1.11.0/pr_4070.prdoc similarity index 100% rename from prdoc/pr_4070.prdoc rename to prdoc/1.11.0/pr_4070.prdoc diff --git a/prdoc/pr_4072.prdoc b/prdoc/1.11.0/pr_4072.prdoc similarity index 100% rename from prdoc/pr_4072.prdoc rename to prdoc/1.11.0/pr_4072.prdoc diff --git a/prdoc/pr_4075.prdoc b/prdoc/1.11.0/pr_4075.prdoc similarity index 100% rename from prdoc/pr_4075.prdoc rename to prdoc/1.11.0/pr_4075.prdoc diff --git a/prdoc/pr_4089.prdoc b/prdoc/1.11.0/pr_4089.prdoc similarity index 100% rename from prdoc/pr_4089.prdoc rename to prdoc/1.11.0/pr_4089.prdoc diff --git a/prdoc/pr_4118.prdoc b/prdoc/1.11.0/pr_4118.prdoc similarity index 100% rename from prdoc/pr_4118.prdoc rename to prdoc/1.11.0/pr_4118.prdoc diff --git a/prdoc/pr_4151.prdoc b/prdoc/1.11.0/pr_4151.prdoc similarity index 100% rename from prdoc/pr_4151.prdoc rename to prdoc/1.11.0/pr_4151.prdoc diff --git a/prdoc/pr_4156.prdoc b/prdoc/1.11.0/pr_4156.prdoc similarity index 100% rename from prdoc/pr_4156.prdoc rename to prdoc/1.11.0/pr_4156.prdoc diff --git a/prdoc/pr_4168.prdoc b/prdoc/1.11.0/pr_4168.prdoc similarity index 100% rename from prdoc/pr_4168.prdoc rename to prdoc/1.11.0/pr_4168.prdoc diff --git a/prdoc/pr_4169.prdoc b/prdoc/1.11.0/pr_4169.prdoc similarity index 100% rename from prdoc/pr_4169.prdoc rename to prdoc/1.11.0/pr_4169.prdoc diff --git a/prdoc/pr_4171.prdoc b/prdoc/1.11.0/pr_4171.prdoc similarity index 100% rename from prdoc/pr_4171.prdoc rename to prdoc/1.11.0/pr_4171.prdoc diff --git a/prdoc/pr_4177.prdoc b/prdoc/1.11.0/pr_4177.prdoc similarity index 100% rename from prdoc/pr_4177.prdoc rename to prdoc/1.11.0/pr_4177.prdoc diff --git a/prdoc/pr_4189.prdoc b/prdoc/1.11.0/pr_4189.prdoc similarity index 100% rename from prdoc/pr_4189.prdoc rename to prdoc/1.11.0/pr_4189.prdoc diff --git a/prdoc/pr_4199.prdoc b/prdoc/1.11.0/pr_4199.prdoc similarity index 100% rename from prdoc/pr_4199.prdoc rename to prdoc/1.11.0/pr_4199.prdoc diff --git a/prdoc/pr_4208.prdoc b/prdoc/1.11.0/pr_4208.prdoc similarity index 100% rename from prdoc/pr_4208.prdoc rename to prdoc/1.11.0/pr_4208.prdoc diff --git a/prdoc/pr_4221.prdoc b/prdoc/1.11.0/pr_4221.prdoc similarity index 100% rename from prdoc/pr_4221.prdoc rename to prdoc/1.11.0/pr_4221.prdoc diff --git a/prdoc/pr_4229.prdoc b/prdoc/1.11.0/pr_4229.prdoc similarity index 100% rename from prdoc/pr_4229.prdoc rename to prdoc/1.11.0/pr_4229.prdoc diff --git a/prdoc/pr_4252.prdoc b/prdoc/1.11.0/pr_4252.prdoc similarity index 100% rename from prdoc/pr_4252.prdoc rename to prdoc/1.11.0/pr_4252.prdoc -- GitLab From 16d8205770e5fcb334a8324247a36f5737544ed1 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Thu, 2 May 2024 11:30:27 +0200 Subject: [PATCH 100/107] Make parachain template great again (and async backing ready) (#4295) Closes #4272. It turned out that not only the node part but also the runtime wasn't async backing ready. Now, both of them are using proper APIs and producing 6-second blocks out of the box. --- Cargo.lock | 1 + prdoc/pr_4295.prdoc | 14 ++++++++ templates/parachain/node/src/service.rs | 35 +++++++++++-------- templates/parachain/runtime/Cargo.toml | 2 ++ templates/parachain/runtime/src/apis.rs | 16 +++++++-- .../parachain/runtime/src/configs/mod.rs | 28 ++++++--------- templates/parachain/runtime/src/lib.rs | 16 ++++++--- 7 files changed, 73 insertions(+), 39 deletions(-) create mode 100644 prdoc/pr_4295.prdoc diff --git a/Cargo.lock b/Cargo.lock index c3bf3f26385..14a91049921 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11838,6 +11838,7 @@ dependencies = [ "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", diff --git a/prdoc/pr_4295.prdoc b/prdoc/pr_4295.prdoc new file mode 100644 index 00000000000..e8b2010a849 --- /dev/null +++ b/prdoc/pr_4295.prdoc @@ -0,0 +1,14 @@ +title: "Make parachain template async backing ready" + +doc: + - audience: Node Dev + description: | + Promotes the parachain template (both node and runtime) to use async backing APIs so that + developers starting a new project from the template could get async backing integrated out + of the box. + +crates: + - name: parachain-template-node + bump: major + - name: parachain-template-runtime + bump: major diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 373df01b0c4..20e5b37b41b 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -12,6 +12,7 @@ use parachain_template_runtime::{ // Cumulus Imports use cumulus_client_collator::service::CollatorService; +use cumulus_client_consensus_aura::collators::lookahead::{self as aura, Params as AuraParams}; use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; use cumulus_client_service::{ @@ -19,7 +20,10 @@ use cumulus_client_service::{ BuildNetworkParams, CollatorSybilResistance, DARecoveryProfile, ParachainHostFunctions, StartRelayChainTasksParams, }; -use cumulus_primitives_core::{relay_chain::CollatorPair, ParaId}; +use cumulus_primitives_core::{ + relay_chain::{CollatorPair, ValidationCode}, + ParaId, +}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; // Substrate Imports @@ -156,6 +160,7 @@ fn build_import_queue( fn start_consensus( client: Arc, + backend: Arc, block_import: ParachainBlockImport, prometheus_registry: Option<&Registry>, telemetry: Option, @@ -170,10 +175,6 @@ fn start_consensus( overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, ) -> Result<(), sc_service::Error> { - use cumulus_client_consensus_aura::collators::basic::{ - self as basic_aura, Params as BasicAuraParams, - }; - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), client.clone(), @@ -191,11 +192,15 @@ fn start_consensus( client.clone(), ); - let params = BasicAuraParams { + let params = AuraParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client, + para_client: client.clone(), + para_backend: backend, relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, sync_oracle, keystore, collator_key, @@ -204,13 +209,12 @@ fn start_consensus( relay_chain_slot_duration, proposer, collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, + authoring_duration: Duration::from_millis(2000), + reinitialize: false, }; let fut = - basic_aura::run::( + aura::run::( params, ); task_manager.spawn_essential_handle().spawn("aura", None, fut); @@ -318,8 +322,8 @@ pub async fn start_parachain_node( task_manager: &mut task_manager, config: parachain_config, keystore: params.keystore_container.keystore(), - backend, - network: network.clone(), + backend: backend.clone(), + network, sync_service: sync_service.clone(), system_rpc_tx, tx_handler_controller, @@ -382,13 +386,14 @@ pub async fn start_parachain_node( if validator { start_consensus( client.clone(), + backend, block_import, prometheus_registry.as_ref(), telemetry.as_ref().map(|t| t.handle()), &task_manager, - relay_chain_interface.clone(), + relay_chain_interface, transaction_pool, - sync_service.clone(), + sync_service, params.keystore_container.keystore(), relay_chain_slot_duration, para_id, diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 0d985796a11..9cd5b7ca599 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -82,6 +82,7 @@ cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-s cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } +cumulus-primitives-aura = { path = "../../../cumulus/primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../cumulus/primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../cumulus/primitives/utility", default-features = false } cumulus-primitives-storage-weight-reclaim = { path = "../../../cumulus/primitives/storage-weight-reclaim", default-features = false } @@ -98,6 +99,7 @@ std = [ "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-aura/std", "cumulus-primitives-core/std", "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index b13ba278fae..107956ded41 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -42,14 +42,15 @@ use sp_version::RuntimeVersion; // Local module imports use super::{ - AccountId, Aura, Balance, Block, Executive, InherentDataExt, Nonce, ParachainSystem, Runtime, - RuntimeCall, RuntimeGenesisConfig, SessionKeys, System, TransactionPayment, VERSION, + AccountId, Balance, Block, ConsensusHook, Executive, InherentDataExt, Nonce, ParachainSystem, + Runtime, RuntimeCall, RuntimeGenesisConfig, SessionKeys, System, TransactionPayment, + SLOT_DURATION, VERSION, }; impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) } fn authorities() -> Vec { @@ -57,6 +58,15 @@ impl_runtime_apis! { } } + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } + impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index f1aea481ee2..0aec332feaf 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -26,7 +26,7 @@ mod xcm_config; // Substrate and Polkadot dependencies -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ derive_impl, @@ -53,12 +53,11 @@ use xcm::latest::prelude::BodyId; // Local module imports use super::{ weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}, - AccountId, Aura, Balance, Balances, Block, BlockNumber, CollatorSelection, Hash, MessageQueue, - Nonce, PalletInfo, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeFreezeReason, - RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Session, SessionKeys, System, WeightToFee, - XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, BLOCK_PROCESSING_VELOCITY, EXISTENTIAL_DEPOSIT, HOURS, - MAXIMUM_BLOCK_WEIGHT, MICROUNIT, NORMAL_DISPATCH_RATIO, RELAY_CHAIN_SLOT_DURATION_MILLIS, - SLOT_DURATION, UNINCLUDED_SEGMENT_CAPACITY, VERSION, + AccountId, Aura, Balance, Balances, Block, BlockNumber, CollatorSelection, ConsensusHook, Hash, + MessageQueue, Nonce, PalletInfo, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, + RuntimeFreezeReason, RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Session, SessionKeys, + System, WeightToFee, XcmpQueue, AVERAGE_ON_INITIALIZE_RATIO, EXISTENTIAL_DEPOSIT, HOURS, + MAXIMUM_BLOCK_WEIGHT, MICROUNIT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, VERSION, }; use xcm_config::{RelayLocation, XcmOriginToTransactDispatchOrigin}; @@ -128,7 +127,7 @@ impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type MinimumPeriod = ConstU64<0>; type WeightInfo = (); } @@ -195,13 +194,8 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; } impl parachain_info::Config for Runtime {} @@ -271,8 +265,8 @@ impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; + type AllowMultipleBlocksPerSlot = ConstBool; + type SlotDuration = ConstU64; } parameter_types! { diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 5bfd6f290c1..14af00f9b2c 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -173,7 +173,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { /// up by `pallet_aura` to implement `fn slot_duration()`. /// /// Change this to adjust the block time. -pub const MILLISECS_PER_BLOCK: u64 = 12000; +pub const MILLISECS_PER_BLOCK: u64 = 6000; // NOTE: Currently it is not possible to change the slot duration after the chain has started. // Attempting to do so will brick block production. @@ -200,21 +200,29 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(5); /// `Operational` extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 0.5 of a second of compute with a 12 second average block time. +/// We allow for 2 seconds of compute with a 6 second average block time. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, ); /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included /// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; /// How many parachain blocks are processed by the relay chain per parent. Limits the /// number of blocks authored per slot. const BLOCK_PROCESSING_VELOCITY: u32 = 1; /// Relay chain slot duration, in milliseconds. const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +/// Aura consensus hook +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { -- GitLab From 171bedc2b319e18d51a7b510d8bd4cfd2e645c31 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 2 May 2024 13:02:59 +0300 Subject: [PATCH 101/107] Bridge: ignore client errors when calling recently added `*_free_headers_interval` methods (#4350) see https://github.com/paritytech/parity-bridges-common/issues/2974 : we still need to support unupgraded chains (BHK and BHP) in relay We may need to revert this change when all chains are upgraded --- .../lib-substrate-relay/src/finality/target.rs | 13 ++++++++++++- .../lib-substrate-relay/src/parachains/target.rs | 13 ++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index adbcfe0096d..0874fa53549 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -107,13 +107,24 @@ impl TargetClient Result>, Self::Error> { - self.client + Ok(self + .client .typed_state_call( P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), Some(self.client.best_header().await?.hash()), ) .await + .unwrap_or_else(|e| { + log::info!( + target: "bridge", + "Call of {} at {} has failed with an error: {:?}. Treating as `None`", + P::SourceChain::FREE_HEADERS_INTERVAL_METHOD, + P::TargetChain::NAME, + e, + ); + None + })) } async fn submit_finality_proof( diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index e10d15b6edf..531d55b5322 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -122,9 +122,20 @@ where async fn free_source_relay_headers_interval( &self, ) -> Result>, Self::Error> { - self.target_client + Ok(self + .target_client .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) .await + .unwrap_or_else(|e| { + log::info!( + target: "bridge", + "Call of {} at {} has failed with an error: {:?}. Treating as `None`", + P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD, + P::TargetChain::NAME, + e, + ); + None + })) } async fn parachain_head( -- GitLab From 877617c44629e84ee36ac7194f4fe00fe3fa0b71 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 2 May 2024 13:54:57 +0300 Subject: [PATCH 102/107] cargo: Update experimental litep2p to latest version (#4344) This PR updates the litep2p crate to the latest version. This fixes the build for developers that want to perform `cargo update` on all their dependencies: https://github.com/paritytech/polkadot-sdk/pull/4343, by porting the latest changes. The peer records were introduced to litep2p to be able to distinguish and update peers with outdated records. It is going to be properly used in substrate via: https://github.com/paritytech/polkadot-sdk/pull/3786, however that is pending the commit to merge on litep2p master: https://github.com/paritytech/litep2p/pull/96. Closes: https://github.com/paritytech/polkadot-sdk/pull/4343 --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 52 +++++++++---------- substrate/client/network/Cargo.toml | 2 +- .../client/network/src/litep2p/discovery.rs | 5 +- substrate/client/network/types/Cargo.toml | 2 +- 4 files changed, 32 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14a91049921..f5b2ee855f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1358,7 +1358,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_core 0.6.4", "ring 0.1.0", - "sha2 0.10.7", + "sha2 0.10.8", "sp-ark-bls12-381", "sp-ark-ed-on-bls12-381-bandersnatch", "zeroize", @@ -5009,7 +5009,7 @@ dependencies = [ "ed25519 2.2.2", "rand_core 0.6.4", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "subtle 2.5.0", "zeroize", ] @@ -5039,7 +5039,7 @@ dependencies = [ "hashbrown 0.14.3", "hex", "rand_core 0.6.4", - "sha2 0.10.7", + "sha2 0.10.8", "zeroize", ] @@ -7267,7 +7267,7 @@ dependencies = [ "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -7698,7 +7698,7 @@ dependencies = [ "multihash 0.17.0", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "thiserror", "zeroize", ] @@ -7723,7 +7723,7 @@ dependencies = [ "log", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "smallvec", "thiserror", "uint", @@ -7781,7 +7781,7 @@ dependencies = [ "once_cell", "quick-protobuf", "rand 0.8.5", - "sha2 0.10.7", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", @@ -8116,7 +8116,7 @@ dependencies = [ [[package]] name = "litep2p" version = "0.3.0" -source = "git+https://github.com/paritytech/litep2p?branch=master#b142c9eb611fb2fe78d2830266a3675b37299ceb" +source = "git+https://github.com/paritytech/litep2p?rev=e03a6023882db111beeb24d8c0ceaac0721d3f0f#e03a6023882db111beeb24d8c0ceaac0721d3f0f" dependencies = [ "async-trait", "bs58 0.4.0", @@ -8143,7 +8143,7 @@ dependencies = [ "ring 0.16.20", "rustls 0.20.8", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "simple-dns", "smallvec", "snow", @@ -8692,7 +8692,7 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive 0.8.0", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "unsigned-varint", ] @@ -8709,7 +8709,7 @@ dependencies = [ "core2", "digest 0.10.7", "multihash-derive 0.8.0", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "unsigned-varint", ] @@ -8739,7 +8739,7 @@ dependencies = [ "ripemd", "serde", "sha1", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "strobe-rs", ] @@ -11972,7 +11972,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", "rand 0.8.5", - "rand_core 0.5.1", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -12494,7 +12494,7 @@ checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" dependencies = [ "once_cell", "pest", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] @@ -17816,7 +17816,7 @@ dependencies = [ "merlin", "rand_core 0.6.4", "serde_bytes", - "sha2 0.10.7", + "sha2 0.10.8", "subtle 2.5.0", "zeroize", ] @@ -18229,9 +18229,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -18489,7 +18489,7 @@ dependencies = [ "schnorrkel 0.10.2", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "siphasher", "slab", @@ -18556,7 +18556,7 @@ dependencies = [ "rand_core 0.6.4", "ring 0.17.7", "rustc_version 0.4.0", - "sha2 0.10.7", + "sha2 0.10.8", "subtle 2.5.0", ] @@ -19404,7 +19404,7 @@ dependencies = [ "byteorder", "criterion 0.4.0", "digest 0.10.7", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "sp-crypto-hashing-proc-macro", "twox-hash", @@ -19825,7 +19825,7 @@ dependencies = [ "parity-scale-codec", "rand 0.8.5", "scale-info", - "sha2 0.10.7", + "sha2 0.10.8", "sp-api", "sp-application-crypto", "sp-core", @@ -20369,9 +20369,9 @@ dependencies = [ [[package]] name = "str0m" -version = "0.2.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48572247f422dcbe68630c973f8296fbd5157119cd36a3223e48bf83d47727" +checksum = "d3f10d3f68e60168d81110410428a435dbde28cc5525f5f7c6fdec92dbdc2800" dependencies = [ "combine", "crc", @@ -20522,7 +20522,7 @@ dependencies = [ "pbkdf2", "rustc-hex", "schnorrkel 0.11.4", - "sha2 0.10.7", + "sha2 0.10.8", "zeroize", ] @@ -22214,7 +22214,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "thiserror", "zeroize", @@ -22532,7 +22532,7 @@ dependencies = [ "log", "rustix 0.36.15", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "toml 0.5.11", "windows-sys 0.45.0", "zstd 0.11.2+zstd.1.5.2", diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 0879481a419..cda652d4f65 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -59,7 +59,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } wasm-timer = "0.2" -litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" } +litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } once_cell = "1.18.0" void = "1.0.2" schnellru = "0.2.1" diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 27f4d547372..47a620db132 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -462,7 +462,10 @@ impl Stream for Discovery { "`GET_RECORD` succeeded for {query_id:?}: {record:?}", ); - return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, record })); + return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { + query_id, + record: record.record, + })); }, Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) => return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })), diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index d8f03939ab9..e36d34b86fc 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/sc-network-types" [dependencies] bs58 = "0.4.0" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = { git = "https://github.com/paritytech/litep2p", branch = "master" } +litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } multiaddr = "0.17.0" multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } rand = "0.8.5" -- GitLab From b85c5a0df7e53358167c7ce6a3aa3b9900e18476 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 2 May 2024 14:20:51 +0200 Subject: [PATCH 103/107] Add PoV-reclaim enablement guide to polkadot-sdk-docs (#4244) This adds a small guide on how to prepare your parachain for pov-reclaim usage. --- Cargo.lock | 7 ++ docs/sdk/Cargo.toml | 7 +- docs/sdk/src/guides/enable_pov_reclaim.rs | 84 +++++++++++++++++++++++ docs/sdk/src/guides/mod.rs | 3 + docs/sdk/src/meta_contributing.rs | 2 +- templates/parachain/node/Cargo.toml | 1 + templates/parachain/node/src/service.rs | 2 + templates/parachain/runtime/Cargo.toml | 1 + templates/parachain/runtime/src/lib.rs | 1 + 9 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 docs/sdk/src/guides/enable_pov_reclaim.rs diff --git a/Cargo.lock b/Cargo.lock index f5b2ee855f5..d72d6229e1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11786,6 +11786,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", + "docify", "frame-benchmarking", "frame-benchmarking-cli", "futures", @@ -11842,6 +11843,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", + "docify", "frame-benchmarking", "frame-executive", "frame-support", @@ -13897,8 +13899,11 @@ dependencies = [ name = "polkadot-sdk-docs" version = "0.0.1" dependencies = [ + "cumulus-client-service", "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", "docify", "frame-executive", "frame-support", @@ -13936,9 +13941,11 @@ dependencies = [ "sc-consensus-grandpa", "sc-consensus-manual-seal", "sc-consensus-pow", + "sc-executor", "sc-network", "sc-rpc", "sc-rpc-api", + "sc-service", "scale-info", "simple-mermaid", "sp-api", diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 426c5d9de4a..fe53845d849 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -51,6 +51,8 @@ sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } +sc-executor = { path = "../../substrate/client/executor" } +sc-service = { path = "../../substrate/client/service" } substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } @@ -60,9 +62,12 @@ cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-syst "parameterized-consensus-hook", ] } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } -pallet-aura = { path = "../../substrate/frame/aura", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../../cumulus/primitives/proof-size-hostfunction" } +cumulus-client-service = { path = "../../cumulus/client/service" } +cumulus-primitives-storage-weight-reclaim = { path = "../../cumulus/primitives/storage-weight-reclaim" } # Pallets and FRAME internals +pallet-aura = { path = "../../substrate/frame/aura" } pallet-timestamp = { path = "../../substrate/frame/timestamp" } pallet-balances = { path = "../../substrate/frame/balances" } pallet-assets = { path = "../../substrate/frame/assets" } diff --git a/docs/sdk/src/guides/enable_pov_reclaim.rs b/docs/sdk/src/guides/enable_pov_reclaim.rs new file mode 100644 index 00000000000..3c0c5fba215 --- /dev/null +++ b/docs/sdk/src/guides/enable_pov_reclaim.rs @@ -0,0 +1,84 @@ +//! This guide will teach you how to enable storage weight reclaiming for a parachain. The +//! explanations in this guide assume a project structure similar to the one detailed in +//! the [substrate documentation](crate::polkadot_sdk::substrate#anatomy-of-a-binary-crate). Full +//! technical details are available in the original [pull request](https://github.com/paritytech/polkadot-sdk/pull/3002). +//! +//! # What is PoV reclaim? +//! When a parachain submits a block to a relay chain like Polkadot or Kusama, it sends the block +//! itself and a storage proof. Together they form the Proof-of-Validity (PoV). The PoV allows the +//! relay chain to validate the parachain block by re-executing it. Relay chain +//! validators distribute this PoV among themselves over the network. This distribution is costly +//! and limits the size of the storage proof. The storage weight dimension of FRAME weights reflects +//! this cost and limits the size of the storage proof. However, the storage weight determined +//! during [benchmarking](crate::reference_docs::frame_benchmarking_weight) represents the worst +//! case. In reality, runtime operations often consume less space in the storage proof. PoV reclaim +//! offers a mechanism to reclaim the difference between the benchmarked worst-case and the real +//! proof-size consumption. +//! +//! +//! # How to enable PoV reclaim +//! ## 1. Add the host function to your node +//! +//! To reclaim excess storage weight, a parachain runtime needs the +//! ability to fetch the size of the storage proof from the node. The reclaim +//! mechanism uses the +//! [`storage_proof_size`](cumulus_primitives_proof_size_hostfunction::storage_proof_size) +//! host function for this purpose. For convenience, cumulus provides +//! [`ParachainHostFunctions`](cumulus_client_service::ParachainHostFunctions), a set of +//! host functions typically used by cumulus-based parachains. In the binary crate of your +//! parachain, find the instantiation of the [`WasmExecutor`](sc_executor::WasmExecutor) and set the +//! correct generic type. +//! +//! This example from the parachain-template shows a type definition that includes the correct +//! host functions. +#![doc = docify::embed!("../../templates/parachain/node/src/service.rs", wasm_executor)] +//! +//! > **Note:** +//! > +//! > If you see error `runtime requires function imports which are not present on the host: +//! > 'env:ext_storage_proof_size_storage_proof_size_version_1'`, it is likely +//! > that this step in the guide was not set up correctly. +//! +//! ## 2. Enable storage proof recording during import +//! +//! The reclaim mechanism reads the size of the currently recorded storage proof multiple times +//! during block authoring and block import. Proof recording during authoring is already enabled on +//! parachains. You must also ensure that storage proof recording is enabled during block import. +//! Find where your node builds the fundamental substrate components by calling +//! [`new_full_parts`](sc_service::new_full_parts). Replace this +//! with [`new_full_parts_record_import`](sc_service::new_full_parts_record_import) and +//! pass `true` as the last parameter to enable import recording. +#![doc = docify::embed!("../../templates/parachain/node/src/service.rs", component_instantiation)] +//! +//! > **Note:** +//! > +//! > If you see error `Storage root must match that calculated.` during block import, it is likely +//! > that this step in the guide was not +//! > set up correctly. +//! +//! ## 3. Add the SignedExtension to your runtime +//! +//! In your runtime, you will find a list of SignedExtensions. +//! To enable the reclaiming, +//! add [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim) +//! to that list. For maximum efficiency, make sure that `StorageWeightReclaim` is last in the list. +//! The extension will check the size of the storage proof before and after an extrinsic execution. +//! It reclaims the difference between the calculated size and the benchmarked size. +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", template_signed_extra)] +//! +//! ## Optional: Verify that reclaim works +//! +//! Start your node with the log target `runtime::storage_reclaim` set to `trace` to enable full +//! logging for `StorageWeightReclaim`. The following log is an example from a local testnet. To +//! trigger the log, execute any extrinsic on the network. +//! +//! ```ignore +//! ... +//! 2024-04-22 17:31:48.014 TRACE runtime::storage_reclaim: [ferdie] Reclaiming storage weight. benchmarked: 3593, consumed: 265 unspent: 0 +//! ... +//! ``` +//! +//! In the above example we see a benchmarked size of 3593 bytes, while the extrinsic only consumed +//! 265 bytes of proof size. This results in 3328 bytes of reclaim. +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index 3120f253310..2dc807af8ea 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -23,3 +23,6 @@ pub mod cumulus_enabled_parachain; /// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between itself /// and the relay chain to which it is connected. pub mod xcm_enabled_parachain; + +/// How to enable storage weight reclaiming in a parachain node and runtime. +pub mod enable_pov_reclaim; diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index fcdcea9934b..a029595254c 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -139,7 +139,7 @@ //! //! ```sh //! SKIP_WASM_BUILD=1 \ -//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/header.html --extend-css $(pwd)/docs/sdk/headers/theme.css --default-theme=ayu" \ +//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/assets/header.html --extend-css $(pwd)/docs/sdk/assets/theme.css --default-theme=ayu" \ //! cargo doc -p polkadot-sdk-docs --no-deps --open //! ``` //! diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 63267acdbca..ed857b4e4b9 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -24,6 +24,7 @@ serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.28" serde_json = { workspace = true, default-features = true } +docify = "0.2.8" # Local parachain-template-runtime = { path = "../runtime" } diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 20e5b37b41b..ad4689c6e55 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -39,6 +39,7 @@ use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_keystore::KeystorePtr; use substrate_prometheus_endpoint::Registry; +#[docify::export(wasm_executor)] type ParachainExecutor = WasmExecutor; type ParachainClient = TFullClient; @@ -61,6 +62,7 @@ pub type Service = PartialComponents< /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. +#[docify::export(component_instantiation)] pub fn new_partial(config: &Configuration) -> Result { let telemetry = config .telemetry_endpoints diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index 9cd5b7ca599..d15ff2807a6 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -28,6 +28,7 @@ scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } smallvec = "1.11.0" +docify = "0.2.8" # Local pallet-parachain-template = { path = "../pallets/template", default-features = false } diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 14af00f9b2c..179a425ca04 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -75,6 +75,7 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. +#[docify::export(template_signed_extra)] pub type SignedExtra = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, -- GitLab From 4e0b3abbd696c809aebd8d5e64a671abf843087f Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Thu, 2 May 2024 20:25:08 +0800 Subject: [PATCH 104/107] deps: update jsonrpsee to v0.22.5 (#4330) use `server-core` feature instead of `server` feature when defining the rpc api --- Cargo.lock | 97 ++++++------------- .../client/consensus/babe/rpc/Cargo.toml | 2 +- .../client/consensus/beefy/rpc/Cargo.toml | 2 +- .../client/consensus/grandpa/rpc/Cargo.toml | 2 +- .../client/consensus/manual-seal/Cargo.toml | 2 +- .../merkle-mountain-range/rpc/Cargo.toml | 2 +- substrate/client/rpc-api/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 +- substrate/client/sync-state-rpc/Cargo.toml | 2 +- .../frame/transaction-payment/rpc/Cargo.toml | 2 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- substrate/utils/frame/rpc/system/Cargo.toml | 2 +- 12 files changed, 43 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d72d6229e1a..30a4d62900f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1080,7 +1080,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener 2.5.3", + "event-listener", "futures-core", ] @@ -1090,7 +1090,7 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ - "async-lock 2.8.0", + "async-lock", "async-task", "concurrent-queue", "fastrand 1.9.0", @@ -1104,7 +1104,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" dependencies = [ - "async-lock 2.8.0", + "async-lock", "autocfg", "blocking", "futures-lite", @@ -1119,7 +1119,7 @@ dependencies = [ "async-channel", "async-executor", "async-io", - "async-lock 2.8.0", + "async-lock", "blocking", "futures-lite", "once_cell", @@ -1131,7 +1131,7 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock 2.8.0", + "async-lock", "autocfg", "cfg-if", "concurrent-queue", @@ -1151,18 +1151,7 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" -dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy", - "pin-project-lite 0.2.12", + "event-listener", ] [[package]] @@ -1184,11 +1173,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ "async-io", - "async-lock 2.8.0", + "async-lock", "autocfg", "blocking", "cfg-if", - "event-listener 2.5.3", + "event-listener", "futures-lite", "rustix 0.37.23", "signal-hook", @@ -1205,7 +1194,7 @@ dependencies = [ "async-channel", "async-global-executor", "async-io", - "async-lock 2.8.0", + "async-lock", "crossbeam-utils", "futures-channel", "futures-core", @@ -1618,7 +1607,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", - "async-lock 2.8.0", + "async-lock", "async-task", "atomic-waker", "fastrand 1.9.0", @@ -5331,27 +5320,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite 0.2.12", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite 0.2.12", -] - [[package]] name = "exit-future" version = "0.2.0" @@ -7036,7 +7004,7 @@ dependencies = [ "curl", "curl-sys", "encoding_rs", - "event-listener 2.5.3", + "event-listener", "futures-lite", "http", "log", @@ -7112,9 +7080,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3ae45a64cfc0882934f963be9431b2a165d667f53140358181f262aca0702" +checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" dependencies = [ "jsonrpsee-core", "jsonrpsee-http-client", @@ -7128,9 +7096,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455fc882e56f58228df2aee36b88a1340eafd707c76af2fa68cf94b37d461131" +checksum = "4978087a58c3ab02efc5b07c5e5e2803024536106fd5506f558db172c889b3aa" dependencies = [ "futures-util", "http", @@ -7149,12 +7117,11 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75568f4f9696e3a47426e1985b548e1a9fcb13372a5e320372acaf04aca30d1" +checksum = "b4b257e1ec385e07b0255dde0b933f948b5c8b8c28d42afda9587c3a967b896d" dependencies = [ "anyhow", - "async-lock 3.3.0", "async-trait", "beef", "futures-timer", @@ -7175,9 +7142,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e7a95e346f55df84fb167b7e06470e196e7d5b9488a21d69c5d9732043ba7ba" +checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" dependencies = [ "async-trait", "hyper", @@ -7195,9 +7162,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca066e73dd70294aebc5c2675d8ffae43be944af027c857ce0d4c51785f014" +checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.0.0", @@ -7208,9 +7175,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e29c1bd1f9bba83c864977c73404e505f74f730fa0db89dd490ec174e36d7f0" +checksum = "12d8b6a9674422a8572e0b0abb12feeb3f2aeda86528c80d0350c2bd0923ab41" dependencies = [ "futures-util", "http", @@ -7232,9 +7199,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3467fd35feeee179f71ab294516bdf3a81139e7aeebdd860e46897c12e1a3368" +checksum = "150d6168405890a7a3231a3c74843f58b8959471f6df76078db2619ddee1d07d" dependencies = [ "anyhow", "beef", @@ -7245,9 +7212,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ca71e74983f624c0cb67828e480a981586074da8ad3a2f214c6a3f884edab9" +checksum = "58b9db2dfd5bb1194b0ce921504df9ceae210a345bc2f6c5a61432089bbab070" dependencies = [ "http", "jsonrpsee-client-transport", @@ -18438,7 +18405,7 @@ dependencies = [ "async-executor", "async-fs", "async-io", - "async-lock 2.8.0", + "async-lock", "async-net", "async-process", "blocking", @@ -18461,7 +18428,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0bb30cf57b7b5f6109ce17c3164445e2d6f270af2cb48f6e4d31c2967c9a9f5" dependencies = [ "arrayvec 0.7.4", - "async-lock 2.8.0", + "async-lock", "atomic-take", "base64 0.21.2", "bip39", @@ -18472,7 +18439,7 @@ dependencies = [ "derive_more", "ed25519-zebra 4.0.3", "either", - "event-listener 2.5.3", + "event-listener", "fnv", "futures-lite", "futures-util", @@ -18515,12 +18482,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "256b5bad1d6b49045e95fe87492ce73d5af81545d8b4d8318a872d2007024c33" dependencies = [ "async-channel", - "async-lock 2.8.0", + "async-lock", "base64 0.21.2", "blake2-rfc", "derive_more", "either", - "event-listener 2.5.3", + "event-listener", "fnv", "futures-channel", "futures-lite", diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index b2661bbde27..4c755df541d 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } futures = "0.3.30" serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index e46fc4f4410..0959424ba86 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.30" -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index 0789a429ac4..9b73418c958 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 1422d46105b..7aa8df248b7 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } assert_matches = "1.3.0" async-trait = "0.1.79" codec = { package = "parity-scale-codec", version = "3.6.1" } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index 9b391b76ea0..ec790790678 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 169714d2245..c5613662b9f 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -28,4 +28,4 @@ sp-core = { path = "../../primitives/core" } sp-rpc = { path = "../../primitives/rpc" } sp-runtime = { path = "../../primitives/runtime" } sp-version = { path = "../../primitives/version" } -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index f2fc7bee6e2..1b5696f657c 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } # Internal chain structures for "chain_spec". sc-chain-spec = { path = "../chain-spec" } # Pool for submitting extrinsics required by "transaction" diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index 09dc611caa0..fd053d326e9 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 6d7f632af82..7f5e0d0b466 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 98a87b7d5b2..3673b2790c5 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -24,7 +24,7 @@ sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } trie-db = "0.29.0" -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } # Substrate Dependencies sc-client-api = { path = "../../../../client/api" } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 9e571bef9d0..3e623daa14b 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } +jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } futures = "0.3.30" log = { workspace = true, default-features = true } frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } -- GitLab From df84ea789f3fd0de20bd801e344ffa30172ffb55 Mon Sep 17 00:00:00 2001 From: Lulu Date: Thu, 2 May 2024 14:34:48 +0100 Subject: [PATCH 105/107] sc-tracing: enable env-filter feature (#4357) This crate uses this feature however it appears to still work without this feature enabled. I believe this is due to feature unification of the workspace. Some other crate enables this feature so it also ends up enabled here. But when this crate is pushed to crates.io and compiled individualy it fails to compile. --- substrate/client/tracing/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index ba1a7c51ab8..a8f0676a82b 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -30,7 +30,7 @@ serde = { workspace = true, default-features = true } thiserror = { workspace = true } tracing = "0.1.29" tracing-log = "0.1.3" -tracing-subscriber = { workspace = true, features = ["parking_lot"] } +tracing-subscriber = { workspace = true, features = ["env-filter", "parking_lot"] } sc-client-api = { path = "../api" } sc-tracing-proc-macro = { path = "proc-macro" } sp-api = { path = "../../primitives/api" } -- GitLab From 30a1972ee5608afa22cd5b72339acb59bb51b0f3 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Thu, 2 May 2024 16:08:24 +0200 Subject: [PATCH 106/107] More `xcm::v4` cleanup and `xcm_fee_payment_runtime_api::XcmPaymentApi` nits (#4355) This PR: - changes `xcm::v4` to `xcm::prelude` imports for coretime stuff - changes `query_acceptable_payment_assets` / `query_weight_to_asset_fee` implementations to be more resilient to the XCM version change - adds `xcm_fee_payment_runtime_api::XcmPaymentApi` to the AssetHubRococo/Westend exposing a native token as acceptable payment asset Continuation of: https://github.com/paritytech/polkadot-sdk/pull/3607 Closes: https://github.com/paritytech/polkadot-sdk/issues/4297 ## Possible follow-ups - [ ] add all sufficient assets (`Assets`, `ForeignAssets`) as acceptable payment assets ? --- Cargo.lock | 2 + .../assets/asset-hub-rococo/Cargo.toml | 2 + .../assets/asset-hub-rococo/src/lib.rs | 48 +++++++++++++++- .../assets/asset-hub-westend/Cargo.toml | 2 + .../assets/asset-hub-westend/src/lib.rs | 56 ++++++++++++++++--- .../parachains/src/coretime/migration.rs | 2 +- .../runtime/parachains/src/coretime/mod.rs | 4 +- polkadot/runtime/rococo/src/lib.rs | 32 +++++++---- polkadot/runtime/westend/src/lib.rs | 32 +++++++---- .../xcm-fee-payment-runtime-api/src/lib.rs | 2 +- 10 files changed, 145 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 30a4d62900f..634a98f82f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -887,6 +887,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -1009,6 +1010,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 47574783810..64abedbaac7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -69,6 +69,7 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -246,6 +247,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index c4f4bd4c1ee..2a8c5549f61 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -62,7 +62,7 @@ use frame_support::{ ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Equals, InstanceFilter, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, }; use frame_system::{ @@ -91,13 +91,16 @@ pub use sp_runtime::BuildStorage; // Polkadot imports use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -// We exclude `Assets` since it's the name of a pallet #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ Asset, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; -use xcm::latest::prelude::{AssetId, BodyId}; +use xcm::{ + latest::prelude::{AssetId, BodyId}, + IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, +}; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -1278,6 +1281,45 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable = vec![ + // native token + VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) + ]; + + Ok(acceptable + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 554659415a0..a767ac5af6c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -66,6 +66,7 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } @@ -239,6 +240,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index a3593732f31..f9ab2145ffc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -45,7 +45,7 @@ use frame_support::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, Equals, InstanceFilter, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, }; use frame_system::{ @@ -74,9 +74,10 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm_config::{ - ForeignAssetsConvertedConcreteId, PoolAssetsConvertedConcreteId, - TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocationV3, WestendLocation, - WestendLocationV3, XcmOriginToTransactDispatchOrigin, + ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, + PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, + TrustBackedAssetsPalletLocationV3, WestendLocation, WestendLocationV3, + XcmOriginToTransactDispatchOrigin, }; #[cfg(any(feature = "std", test))] @@ -84,16 +85,18 @@ pub use sp_runtime::BuildStorage; use assets_common::{foreign_creators::ForeignCreators, matching::FromSiblingParachain}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -// We exclude `Assets` since it's the name of a pallet -use xcm::latest::prelude::AssetId; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ Asset, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; +use xcm::{ + latest::prelude::AssetId, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; +use xcm_fee_payment_runtime_api::Error as XcmPaymentApiError; -use crate::xcm_config::ForeignCreatorsSovereignAccountOf; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; impl_opaque_keys! { @@ -1369,6 +1372,45 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable = vec![ + // native token + VersionedAssetId::from(AssetId(xcm_config::WestendLocation::get())) + ]; + + Ok(acceptable + .into_iter() + .filter_map(|asset| asset.into_version(xcm_version).ok()) + .collect()) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 4f52fc99ec3..6c8ddaa8aab 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -46,7 +46,7 @@ mod v_coretime { #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; use sp_std::{iter, prelude::*, result}; - use xcm::v4::{send_xcm, Instruction, Junction, Location, SendError, WeightLimit, Xcm}; + use xcm::prelude::{send_xcm, Instruction, Junction, Location, SendError, WeightLimit, Xcm}; /// Return information about a legacy lease of a parachain. pub trait GetLegacyLease { diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index a30f7336f69..94bce4c83e6 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -26,7 +26,9 @@ pub use pallet::*; use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; use primitives::{CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; -use xcm::v4::{send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm}; +use xcm::prelude::{ + send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm, +}; use crate::{ assigner_coretime::{self, PartsOf57600}, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 56f4aa40ef1..90a0fe41d4a 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1765,24 +1765,32 @@ sp_api::impl_runtime_apis! { impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - if !matches!(xcm_version, 3 | 4) { - return Err(XcmPaymentApiError::UnhandledXcmVersion); - } - Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + let acceptable = vec![ + // native token + VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) + ]; + + Ok(acceptable .into_iter() .filter_map(|asset| asset.into_version(xcm_version).ok()) .collect()) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); - let asset = asset - .into_version(4) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; - - if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } - - Ok(WeightToFee::weight_to_fee(&weight)) + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } } fn query_xcm_weight(message: VersionedXcm<()>) -> Result { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 0a5312b538b..05a417e17f9 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2199,24 +2199,32 @@ sp_api::impl_runtime_apis! { impl xcm_fee_payment_runtime_api::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - if !matches!(xcm_version, 3 | 4) { - return Err(XcmPaymentApiError::UnhandledXcmVersion); - } - Ok([VersionedAssetId::V4(xcm_config::TokenLocation::get().into())] + let acceptable = vec![ + // native token + VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) + ]; + + Ok(acceptable .into_iter() .filter_map(|asset| asset.into_version(xcm_version).ok()) .collect()) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let local_asset = VersionedAssetId::V4(xcm_config::TokenLocation::get().into()); - let asset = asset - .into_version(4) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; - - if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } - - Ok(WeightToFee::weight_to_fee(&weight)) + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } } fn query_xcm_weight(message: VersionedXcm<()>) -> Result { diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs index 20bf9236f1f..50fd4692cb0 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/src/lib.rs @@ -41,7 +41,7 @@ sp_api::decl_runtime_apis! { /// /// # Arguments /// - /// * `xcm_version`: Version. + /// * `xcm_version`: desired XCM `Version` of `VersionedAssetId`. fn query_acceptable_payment_assets(xcm_version: Version) -> Result, Error>; /// Returns a weight needed to execute a XCM. -- GitLab From 6580101ef3d5c36e1d84a820136fb87f398b04a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 2 May 2024 17:19:38 +0200 Subject: [PATCH 107/107] Deprecate `NativeElseWasmExecutor` (#4329) The native executor is deprecated and downstream users should stop using it. --- cumulus/test/client/src/lib.rs | 33 ++++--------- prdoc/pr_4329.prdoc | 14 ++++++ substrate/client/executor/src/executor.rs | 30 +++++++++--- substrate/client/executor/src/lib.rs | 11 ++--- .../src/chain_head/subscription/inner.rs | 2 +- .../rpc-spec-v2/src/chain_head/tests.rs | 6 +-- substrate/client/service/src/builder.rs | 14 ++++-- .../service/src/client/call_executor.rs | 17 ++----- .../service/src/client/wasm_override.rs | 23 ++++----- substrate/client/service/src/lib.rs | 9 ++-- .../client/service/test/src/client/mod.rs | 21 +++----- substrate/frame/system/src/tests.rs | 12 ++--- .../api/test/tests/runtime_calls.rs | 4 +- substrate/test-utils/client/src/lib.rs | 23 +++------ .../test-utils/runtime/client/src/lib.rs | 49 ++++--------------- 15 files changed, 115 insertions(+), 153 deletions(-) create mode 100644 prdoc/pr_4329.prdoc diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index a39a662553b..d233ad26917 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -45,33 +45,18 @@ pub use substrate_test_client::*; pub type ParachainBlockData = cumulus_primitives_core::ParachainBlockData; -mod local_executor { - /// Native executor instance. - pub struct LocalExecutor; - - impl sc_executor::NativeExecutionDispatch for LocalExecutor { - type ExtendHostFunctions = - cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - cumulus_test_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - cumulus_test_runtime::native_version() - } - } -} - -/// Native executor used for tests. -pub use local_executor::LocalExecutor; - /// Test client database backend. pub type Backend = substrate_test_client::Backend; /// Test client executor. -pub type Executor = - client::LocalCallExecutor>; +pub type Executor = client::LocalCallExecutor< + Block, + Backend, + WasmExecutor<( + sp_io::SubstrateHostFunctions, + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + )>, +>; /// Test client builder for Cumulus pub type TestClientBuilder = @@ -100,7 +85,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { } } -/// A `test-runtime` extensions to `TestClientBuilder`. +/// A `test-runtime` extensions to [`TestClientBuilder`]. pub trait TestClientBuilderExt: Sized { /// Build the test client. fn build(self) -> Client { diff --git a/prdoc/pr_4329.prdoc b/prdoc/pr_4329.prdoc new file mode 100644 index 00000000000..2fe11a60f4b --- /dev/null +++ b/prdoc/pr_4329.prdoc @@ -0,0 +1,14 @@ +title: "Deprecate `NativeElseWasmExecutor`" + +doc: + - audience: Node Dev + description: | + Deprecates the `NativeElseWasmExecutor` as native execution is already + discouraged and should be removed entirely. The executor should be + replaced by `WasmExecutor` which can be found in `sc-executor`. + + The `NativeElseWasmExecutor` will be removed at the end of 2024. + +crates: + - name: sc-executor + bump: minor diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index d56a3b389ef..913bcdfcfe5 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -83,7 +83,7 @@ fn unwrap_heap_pages(pages: Option) -> HeapAllocStrategy { } /// Builder for creating a [`WasmExecutor`] instance. -pub struct WasmExecutorBuilder { +pub struct WasmExecutorBuilder { _phantom: PhantomData, method: WasmExecutionMethod, onchain_heap_alloc_strategy: Option, @@ -218,7 +218,7 @@ impl WasmExecutorBuilder { /// An abstraction over Wasm code executor. Supports selecting execution backend and /// manages runtime cache. -pub struct WasmExecutor { +pub struct WasmExecutor { /// Method used to execute fallback Wasm code. method: WasmExecutionMethod, /// The heap allocation strategy for onchain Wasm calls. @@ -252,10 +252,13 @@ impl Clone for WasmExecutor { } } -impl WasmExecutor -where - H: HostFunctions, -{ +impl Default for WasmExecutor { + fn default() -> Self { + WasmExecutorBuilder::new().build() + } +} + +impl WasmExecutor { /// Create new instance. /// /// # Parameters @@ -312,7 +315,12 @@ where pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { self.allow_missing_host_functions = allow_missing_host_functions } +} +impl WasmExecutor +where + H: HostFunctions, +{ /// Execute the given closure `f` with the latest runtime (based on `runtime_code`). /// /// The closure `f` is expected to return `Err(_)` when there happened a `panic!` in native code @@ -558,6 +566,9 @@ where /// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence /// and dispatch to native code when possible, falling back on `WasmExecutor` when not. +#[deprecated( + note = "Native execution will be deprecated, please replace with `WasmExecutor`. Will be removed at end of 2024." +)] pub struct NativeElseWasmExecutor { /// Native runtime version info. native_version: NativeVersion, @@ -568,6 +579,7 @@ pub struct NativeElseWasmExecutor { use_native: bool, } +#[allow(deprecated)] impl NativeElseWasmExecutor { /// /// Create new instance. @@ -628,6 +640,7 @@ impl NativeElseWasmExecutor { } } +#[allow(deprecated)] impl RuntimeVersionOf for NativeElseWasmExecutor { fn runtime_version( &self, @@ -638,12 +651,14 @@ impl RuntimeVersionOf for NativeElseWasmExecutor } } +#[allow(deprecated)] impl GetNativeVersion for NativeElseWasmExecutor { fn native_version(&self) -> &NativeVersion { &self.native_version } } +#[allow(deprecated)] impl CodeExecutor for NativeElseWasmExecutor { type Error = Error; @@ -718,6 +733,7 @@ impl CodeExecutor for NativeElseWasmExecut } } +#[allow(deprecated)] impl Clone for NativeElseWasmExecutor { fn clone(&self) -> Self { NativeElseWasmExecutor { @@ -728,6 +744,7 @@ impl Clone for NativeElseWasmExecutor { } } +#[allow(deprecated)] impl sp_core::traits::ReadRuntimeVersion for NativeElseWasmExecutor { fn read_runtime_version( &self, @@ -765,6 +782,7 @@ mod tests { } #[test] + #[allow(deprecated)] fn native_executor_registers_custom_interface() { let executor = NativeElseWasmExecutor::::new_with_wasm_executor( WasmExecutor::builder().build(), diff --git a/substrate/client/executor/src/lib.rs b/substrate/client/executor/src/lib.rs index 6b99f0a6ee0..204f1ff22d7 100644 --- a/substrate/client/executor/src/lib.rs +++ b/substrate/client/executor/src/lib.rs @@ -36,18 +36,17 @@ mod executor; mod integration_tests; mod wasm_runtime; -pub use self::{ - executor::{ - with_externalities_safe, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, - }, - wasm_runtime::{read_embedded_version, WasmExecutionMethod}, -}; pub use codec::Codec; +#[allow(deprecated)] +pub use executor::NativeElseWasmExecutor; +pub use executor::{with_externalities_safe, NativeExecutionDispatch, WasmExecutor}; #[doc(hidden)] pub use sp_core::traits::Externalities; pub use sp_version::{NativeVersion, RuntimeVersion}; #[doc(hidden)] pub use sp_wasm_interface; +pub use sp_wasm_interface::HostFunctions; +pub use wasm_runtime::{read_embedded_version, WasmExecutionMethod}; pub use sc_executor_common::{ error, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 3495d9e5449..a6edc344bc6 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -864,7 +864,7 @@ mod tests { Arc>>, ) { let backend = Arc::new(sc_client_api::in_mem::Backend::new()); - let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let executor = substrate_test_runtime_client::WasmExecutor::default(); let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 4bab2194e08..363d11235dd 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -16,13 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use super::*; use crate::{ chain_head::{api::ChainHeadApiClient, event::MethodResponse, test_utils::ChainHeadMockClient}, common::events::{StorageQuery, StorageQueryType, StorageResultType}, hex_string, }; - -use super::*; use assert_matches::assert_matches; use codec::{Decode, Encode}; use futures::Future; @@ -32,7 +31,6 @@ use jsonrpsee::{ }, rpc_params, MethodsError as Error, RpcModule, }; - use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; @@ -2550,7 +2548,7 @@ async fn follow_report_multiple_pruned_block() { async fn pin_block_references() { // Manually construct an in-memory backend and client. let backend = Arc::new(sc_client_api::in_mem::Backend::new()); - let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let executor = substrate_test_runtime_client::WasmExecutor::default(); let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index d0d7cba3862..06fc2ea3b30 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -37,8 +37,8 @@ use sc_client_api::{ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::{ - sp_wasm_interface::HostFunctions, HeapAllocStrategy, NativeElseWasmExecutor, - NativeExecutionDispatch, RuntimeVersionOf, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, + sp_wasm_interface::HostFunctions, HeapAllocStrategy, NativeExecutionDispatch, RuntimeVersionOf, + WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }; use sc_keystore::LocalKeystore; use sc_network::{ @@ -262,11 +262,15 @@ where Ok((client, backend, keystore_container, task_manager)) } -/// Creates a [`NativeElseWasmExecutor`] according to [`Configuration`]. +/// Creates a [`NativeElseWasmExecutor`](sc_executor::NativeElseWasmExecutor) according to +/// [`Configuration`]. +#[deprecated(note = "Please switch to `new_wasm_executor`. Will be removed at end of 2024.")] +#[allow(deprecated)] pub fn new_native_or_wasm_executor( config: &Configuration, -) -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new_with_wasm_executor(new_wasm_executor(config)) +) -> sc_executor::NativeElseWasmExecutor { + #[allow(deprecated)] + sc_executor::NativeElseWasmExecutor::new_with_wasm_executor(new_wasm_executor(config)) } /// Creates a [`WasmExecutor`] according to [`Configuration`]. diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index 86b5c7c61fc..9da4d219257 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -337,26 +337,17 @@ mod tests { use super::*; use backend::Backend; use sc_client_api::in_mem; - use sc_executor::{NativeElseWasmExecutor, WasmExecutor}; + use sc_executor::WasmExecutor; use sp_core::{ testing::TaskExecutor, traits::{FetchRuntimeCode, WrappedRuntimeCode}, }; use std::collections::HashMap; - use substrate_test_runtime_client::{runtime, GenesisInit, LocalExecutorDispatch}; - - fn executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new_with_wasm_executor( - WasmExecutor::builder() - .with_max_runtime_instances(1) - .with_runtime_cache_size(2) - .build(), - ) - } + use substrate_test_runtime_client::{runtime, GenesisInit}; #[test] fn should_get_override_if_exists() { - let executor = executor(); + let executor = WasmExecutor::default(); let overrides = crate::client::wasm_override::dummy_overrides(); let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); @@ -425,7 +416,7 @@ mod tests { fn returns_runtime_version_from_substitute() { const SUBSTITUTE_SPEC_NAME: &str = "substitute-spec-name-cool"; - let executor = executor(); + let executor = WasmExecutor::default(); let backend = Arc::new(in_mem::Backend::::new()); diff --git a/substrate/client/service/src/client/wasm_override.rs b/substrate/client/service/src/client/wasm_override.rs index 725c8ab9429..678ae22bec1 100644 --- a/substrate/client/service/src/client/wasm_override.rs +++ b/substrate/client/service/src/client/wasm_override.rs @@ -264,24 +264,21 @@ pub fn dummy_overrides() -> WasmOverride { #[cfg(test)] mod tests { use super::*; - use sc_executor::{HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor}; + use sc_executor::{HeapAllocStrategy, WasmExecutor}; use std::fs::{self, File}; - use substrate_test_runtime_client::LocalExecutorDispatch; - - fn executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::::new_with_wasm_executor( - WasmExecutor::builder() - .with_onchain_heap_alloc_strategy(HeapAllocStrategy::Static {extra_pages: 128}) - .with_offchain_heap_alloc_strategy(HeapAllocStrategy::Static {extra_pages: 128}) - .with_max_runtime_instances(1) - .with_runtime_cache_size(2) - .build() - ) + + fn executor() -> WasmExecutor { + WasmExecutor::builder() + .with_onchain_heap_alloc_strategy(HeapAllocStrategy::Static { extra_pages: 128 }) + .with_offchain_heap_alloc_strategy(HeapAllocStrategy::Static { extra_pages: 128 }) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .build() } fn wasm_test(fun: F) where - F: Fn(&Path, &[u8], &NativeElseWasmExecutor), + F: Fn(&Path, &[u8], &WasmExecutor), { let exec = executor(); let bytes = substrate_test_runtime::wasm_binary_unwrap(); diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index e46cfab50a3..d0f315c30c8 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -55,14 +55,15 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_record_import, new_full_parts_with_genesis_builder, - new_native_or_wasm_executor, new_wasm_executor, spawn_tasks, BuildNetworkParams, - KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, - TFullClient, + new_full_parts_record_import, new_full_parts_with_genesis_builder, new_wasm_executor, + spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, + TFullBackend, TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, }; +#[allow(deprecated)] +pub use builder::new_native_or_wasm_executor; pub use sc_chain_spec::{ construct_genesis_block, resolve_state_version_from_wasm, BuildGenesisBlock, diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 51dcc4966e5..4fcb7c160cb 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -28,6 +28,7 @@ use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, Pru use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; +use sc_executor::WasmExecutor; use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; @@ -42,8 +43,6 @@ use sp_storage::{ChildInfo, StorageKey}; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::TestAPI; use substrate_test_runtime_client::{ - new_native_or_wasm_executor, - prelude::*, runtime::{ currency::DOLLARS, genesismap::{insert_genesis_block, GenesisStorageBuilder}, @@ -79,7 +78,7 @@ fn construct_block( StateMachine::new( backend, &mut overlay, - &new_native_or_wasm_executor(), + &WasmExecutor::default(), "Core_initialize_block", &header.encode(), &mut Default::default(), @@ -93,7 +92,7 @@ fn construct_block( StateMachine::new( backend, &mut overlay, - &new_native_or_wasm_executor(), + &WasmExecutor::default(), "BlockBuilder_apply_extrinsic", &tx.encode(), &mut Default::default(), @@ -107,7 +106,7 @@ fn construct_block( let ret_data = StateMachine::new( backend, &mut overlay, - &new_native_or_wasm_executor(), + &WasmExecutor::default(), "BlockBuilder_finalize_block", &[], &mut Default::default(), @@ -175,7 +174,7 @@ fn construct_genesis_should_work_with_native() { let _ = StateMachine::new( &backend, &mut overlay, - &new_native_or_wasm_executor(), + &WasmExecutor::default(), "Core_execute_block", &b1data, &mut Default::default(), @@ -206,7 +205,7 @@ fn construct_genesis_should_work_with_wasm() { let _ = StateMachine::new( &backend, &mut overlay, - &new_native_or_wasm_executor(), + &WasmExecutor::default(), "Core_execute_block", &b1data, &mut Default::default(), @@ -2072,7 +2071,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { use substrate_test_runtime_client::GenesisInit; let backend = Arc::new(sc_client_api::in_mem::Backend::new()); - let executor = new_native_or_wasm_executor(); + let executor = WasmExecutor::default(); let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( @@ -2099,11 +2098,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { type TestClient = Client< in_mem::Backend, - LocalCallExecutor< - Block, - in_mem::Backend, - sc_executor::NativeElseWasmExecutor, - >, + LocalCallExecutor, WasmExecutor>, Block, RuntimeApi, >; diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index b889b5ca046..22b98014db4 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -21,14 +21,14 @@ use frame_support::{ dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, traits::{OnRuntimeUpgrade, WhitelistedStorageKeys}, }; -use std::collections::BTreeSet; - use mock::{RuntimeOrigin, *}; use sp_core::{hexdisplay::HexDisplay, H256}; use sp_runtime::{ traits::{BlakeTwo256, Header}, DispatchError, DispatchErrorWithPostInfo, }; +use std::collections::BTreeSet; +use substrate_test_runtime_client::WasmExecutor; #[test] fn check_whitelist() { @@ -653,7 +653,7 @@ fn assert_runtime_updated_digest(num: usize) { #[test] fn set_code_with_real_wasm_blob() { - let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let executor = WasmExecutor::default(); let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { @@ -679,7 +679,7 @@ fn set_code_with_real_wasm_blob() { fn set_code_rejects_during_mbm() { Ongoing::set(true); - let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let executor = substrate_test_runtime_client::WasmExecutor::default(); let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { @@ -699,7 +699,7 @@ fn set_code_rejects_during_mbm() { #[test] fn set_code_via_authorization_works() { - let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let executor = substrate_test_runtime_client::WasmExecutor::default(); let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { @@ -739,7 +739,7 @@ fn set_code_via_authorization_works() { #[test] fn runtime_upgraded_with_set_storage() { - let executor = substrate_test_runtime_client::new_native_or_wasm_executor(); + let executor = substrate_test_runtime_client::WasmExecutor::default(); let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(executor)); ext.execute_with(|| { diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index e66be7f9bf1..5a524d1c7f4 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -122,9 +122,7 @@ fn record_proof_works() { // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); - let executor = NativeElseWasmExecutor::::new_with_wasm_executor( - WasmExecutor::builder().build(), - ); + let executor: WasmExecutor = WasmExecutor::builder().build(); execution_proof_check_on_trie_backend( &backend, &mut overlay, diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index d283b24f286..c07640653d5 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -24,7 +24,7 @@ pub mod client_ext; pub use self::client_ext::{BlockOrigin, ClientBlockImportExt, ClientExt}; pub use sc_client_api::{execution_extensions::ExecutionExtensions, BadBlocks, ForkBlocks}; pub use sc_client_db::{self, Backend, BlocksPruning}; -pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod, WasmExecutor}; +pub use sc_executor::{self, WasmExecutionMethod, WasmExecutor}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; pub use sp_keyring::{ @@ -245,14 +245,8 @@ impl } } -impl - TestClientBuilder< - Block, - client::LocalCallExecutor>, - Backend, - G, - > where - D: sc_executor::NativeExecutionDispatch, +impl + TestClientBuilder>, Backend, G> { /// Build the test client with the given native executor. pub fn build_with_native_executor( @@ -261,21 +255,18 @@ impl ) -> ( client::Client< Backend, - client::LocalCallExecutor>, + client::LocalCallExecutor>, Block, RuntimeApi, >, sc_consensus::LongestChain, ) where - I: Into>>, - D: sc_executor::NativeExecutionDispatch + 'static, + I: Into>>, Backend: sc_client_api::backend::Backend + 'static, + H: sc_executor::HostFunctions, { - let mut executor = executor.into().unwrap_or_else(|| { - NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) - }); - executor.disable_use_native(); + let executor = executor.into().unwrap_or_else(|| WasmExecutor::::builder().build()); let executor = LocalCallExecutor::new( self.backend.clone(), executor.clone(), diff --git a/substrate/test-utils/runtime/client/src/lib.rs b/substrate/test-utils/runtime/client/src/lib.rs index 7428a7de3a0..435f3f5ebac 100644 --- a/substrate/test-utils/runtime/client/src/lib.rs +++ b/substrate/test-utils/runtime/client/src/lib.rs @@ -42,38 +42,18 @@ pub mod prelude { }; // Client structs pub use super::{ - Backend, ExecutorDispatch, LocalExecutorDispatch, NativeElseWasmExecutor, TestClient, - TestClientBuilder, WasmExecutionMethod, + Backend, ExecutorDispatch, TestClient, TestClientBuilder, WasmExecutionMethod, }; // Keyring pub use super::{AccountKeyring, Sr25519Keyring}; } -/// A unit struct which implements `NativeExecutionDispatch` feeding in the -/// hard-coded runtime. -pub struct LocalExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for LocalExecutorDispatch { - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - substrate_test_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - substrate_test_runtime::native_version() - } -} - /// Test client database backend. pub type Backend = substrate_test_client::Backend; /// Test client executor. -pub type ExecutorDispatch = client::LocalCallExecutor< - substrate_test_runtime::Block, - Backend, - NativeElseWasmExecutor, ->; +pub type ExecutorDispatch = + client::LocalCallExecutor; /// Parameters of test-client builder with test-runtime. #[derive(Default)] @@ -113,14 +93,10 @@ pub type TestClientBuilder = substrate_test_client::TestClientBuilder< GenesisParameters, >; -/// Test client type with `LocalExecutorDispatch` and generic Backend. +/// Test client type with `WasmExecutor` and generic Backend. pub type Client = client::Client< B, - client::LocalCallExecutor< - substrate_test_runtime::Block, - B, - NativeElseWasmExecutor, - >, + client::LocalCallExecutor, substrate_test_runtime::Block, substrate_test_runtime::RuntimeApi, >; @@ -206,14 +182,8 @@ pub trait TestClientBuilderExt: Sized { } impl TestClientBuilderExt - for TestClientBuilder< - client::LocalCallExecutor< - substrate_test_runtime::Block, - B, - NativeElseWasmExecutor, - >, - B, - > where + for TestClientBuilder, B> +where B: sc_client_api::backend::Backend + 'static, { fn genesis_init_mut(&mut self) -> &mut GenesisParameters { @@ -238,6 +208,7 @@ pub fn new() -> Client { } /// Create a new native executor. -pub fn new_native_or_wasm_executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) +#[deprecated(note = "Switch to `WasmExecutor:default()`.")] +pub fn new_native_or_wasm_executor() -> WasmExecutor { + WasmExecutor::default() } -- GitLab