Commits on Source (36)
......@@ -15,7 +15,7 @@ jobs:
os: ["ubuntu-latest"]
runs-on: ${{ matrix.os }}
container:
image: docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109
image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
......
......@@ -42,7 +42,6 @@ jobs:
URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb
wget $URL -O tera.deb
sudo dpkg -i tera.deb
tera --version
- name: Download artifacts
uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
......@@ -70,7 +69,7 @@ jobs:
export REF1=$(get_latest_release_tag)
if [[ -z "${{ inputs.version }}" ]]; then
export REF2="${{ github.ref }}"
export REF2="${{ github.ref_name }}"
else
export REF2="${{ inputs.version }}"
fi
......@@ -79,10 +78,6 @@ jobs:
./scripts/release/build-changelogs.sh
echo "Checking the folder state"
pwd
ls -la scripts/release
- name: Archive artifact context.json
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with:
......@@ -151,5 +146,5 @@ jobs:
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io
message: |
**New version of polkadot tagged**: ${{ github.ref }}<br/>
**New version of polkadot tagged**: ${{ github.ref_name }}<br/>
Draft release created: ${{ needs.publish-release-draft.outputs.release_url }}
......@@ -21,25 +21,30 @@ jobs:
- name: Skip merge queue
if: ${{ contains(github.ref, 'gh-readonly-queue') }}
run: exit 0
- name: Get comments
- name: Get PR data
id: comments
run: echo "bodies=$(gh pr view ${{ github.event.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT"
run: |
echo "bodies=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT"
echo "reviews=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews --jq '[.[].state]')" >> "$GITHUB_OUTPUT"
env:
GH_TOKEN: ${{ github.token }}
- name: Fail when author pushes new code
# Require new reviews when the author is pushing and he is not a member
if: |
contains(fromJson(steps.comments.outputs.reviews), 'APPROVED') &&
github.event_name == 'pull_request_target' &&
github.event.action == 'synchronize' &&
github.event.sender.login == github.event.pull_request.user.login &&
github.event.pull_request.author_association != 'CONTRIBUTOR' &&
github.event.pull_request.author_association != 'MEMBER'
run: |
echo "User's association is ${{ github.event.pull_request.author_association }}"
# We get the list of reviewers who approved the PR
REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.number }}/reviews \
REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews \
--jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}')
# We request them to review again
echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.number }}/requested_reviewers --input -
echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input -
echo "::error::Project needs to be reviewed again"
exit 1
......@@ -49,7 +54,7 @@ jobs:
# If the previous step failed and github-actions hasn't commented yet we comment instructions
if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed')
run: |
gh pr comment ${{ github.event.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed"
gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed"
env:
GH_TOKEN: ${{ github.token }}
COMMENTS: ${{ steps.comments.outputs.users }}
......
name: test-github-actions
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
CARGO_NET_GIT_FETCH_WITH_CLI: true
jobs:
test-linux-stable-int:
runs-on: arc-runners-polkadot-sdk
timeout-minutes: 30
container:
image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408"
env:
RUSTFLAGS: "-C debug-assertions -D warnings"
RUST_BACKTRACE: 1
WASM_BUILD_NO_COLOR: 1
WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
# Ensure we run the UI tests.
RUN_UI_TESTS: 1
steps:
- name: Checkout
uses: actions/checkout@v4
- name: script
run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored
quick-benchmarks:
runs-on: arc-runners-polkadot-sdk
timeout-minutes: 30
container:
image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408"
env:
RUSTFLAGS: "-C debug-assertions -D warnings"
RUST_BACKTRACE: "full"
WASM_BUILD_NO_COLOR: 1
WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
steps:
- name: Checkout
uses: actions/checkout@v4
- name: script
run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet
......@@ -820,17 +820,22 @@ dependencies = [
"assert_matches",
"asset-hub-rococo-runtime",
"asset-test-utils",
"cumulus-pallet-parachain-system",
"emulated-integration-tests-common",
"frame-support",
"pallet-asset-conversion",
"pallet-assets",
"pallet-balances",
"pallet-message-queue",
"pallet-treasury",
"pallet-utility",
"pallet-xcm",
"parachains-common",
"parity-scale-codec",
"penpal-runtime",
"polkadot-runtime-common",
"rococo-runtime",
"rococo-runtime-constants",
"rococo-system-emulated-network",
"sp-runtime",
"staging-xcm",
......@@ -866,6 +871,7 @@ dependencies = [
"hex-literal",
"log",
"pallet-asset-conversion",
"pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment",
"pallet-assets",
"pallet-aura",
......@@ -990,6 +996,7 @@ dependencies = [
"hex-literal",
"log",
"pallet-asset-conversion",
"pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment",
"pallet-assets",
"pallet-aura",
......@@ -2830,6 +2837,36 @@ dependencies = [
"testnet-parachains-constants",
]
[[package]]
name = "collectives-westend-integration-tests"
version = "1.0.0"
dependencies = [
"assert_matches",
"asset-hub-westend-runtime",
"collectives-westend-runtime",
"cumulus-pallet-parachain-system",
"cumulus-pallet-xcmp-queue",
"emulated-integration-tests-common",
"frame-support",
"pallet-asset-rate",
"pallet-assets",
"pallet-balances",
"pallet-message-queue",
"pallet-treasury",
"pallet-utility",
"pallet-xcm",
"parachains-common",
"parity-scale-codec",
"polkadot-runtime-common",
"sp-runtime",
"staging-xcm",
"staging-xcm-executor",
"testnet-parachains-constants",
"westend-runtime",
"westend-runtime-constants",
"westend-system-emulated-network",
]
[[package]]
name = "collectives-westend-runtime"
version = "3.0.0"
......@@ -7348,6 +7385,7 @@ dependencies = [
"node-primitives",
"pallet-alliance",
"pallet-asset-conversion",
"pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment",
"pallet-asset-rate",
"pallet-asset-tx-payment",
......@@ -7537,9 +7575,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
[[package]]
name = "libc"
version = "0.2.152"
version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "libflate"
......@@ -9499,6 +9537,7 @@ dependencies = [
"frame-benchmarking",
"frame-support",
"frame-system",
"log",
"pallet-assets",
"pallet-balances",
"parity-scale-codec",
......@@ -9512,6 +9551,27 @@ dependencies = [
"sp-std 14.0.0",
]
[[package]]
name = "pallet-asset-conversion-ops"
version = "0.1.0"
dependencies = [
"frame-benchmarking",
"frame-support",
"frame-system",
"log",
"pallet-asset-conversion",
"pallet-assets",
"pallet-balances",
"parity-scale-codec",
"primitive-types",
"scale-info",
"sp-arithmetic",
"sp-core",
"sp-io",
"sp-runtime",
"sp-std 14.0.0",
]
[[package]]
name = "pallet-asset-conversion-tx-payment"
version = "10.0.0"
......@@ -9936,6 +9996,7 @@ dependencies = [
"frame-benchmarking",
"frame-support",
"frame-system",
"log",
"parity-scale-codec",
"pretty_assertions",
"scale-info",
......@@ -13242,7 +13303,6 @@ dependencies = [
"slotmap",
"sp-core",
"sp-maybe-compressed-blob",
"sp-wasm-interface 20.0.0",
"tempfile",
"test-parachain-adder",
"test-parachain-halt",
......@@ -13279,7 +13339,6 @@ name = "polkadot-node-core-pvf-common"
version = "7.0.0"
dependencies = [
"assert_matches",
"cfg-if",
"cpu-time",
"futures",
"landlock",
......@@ -17404,6 +17463,7 @@ dependencies = [
"sc-transaction-pool",
"sc-transaction-pool-api",
"sc-utils",
"schnellru",
"serde",
"serde_json",
"sp-api",
......
......@@ -103,6 +103,7 @@ members = [
"cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend",
"cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo",
"cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend",
"cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend",
"cumulus/parachains/integration-tests/emulated/tests/people/people-rococo",
"cumulus/parachains/integration-tests/emulated/tests/people/people-westend",
"cumulus/parachains/pallets/collective-content",
......@@ -300,6 +301,7 @@ members = [
"substrate/frame",
"substrate/frame/alliance",
"substrate/frame/asset-conversion",
"substrate/frame/asset-conversion/ops",
"substrate/frame/asset-rate",
"substrate/frame/assets",
"substrate/frame/atomic-swap",
......
......@@ -328,40 +328,58 @@ impl<T: Config<I>, I: 'static> SendXcm for Pallet<T, I> {
xcm: &mut Option<Xcm<()>>,
) -> SendResult<Self::Ticket> {
log::trace!(target: LOG_TARGET, "validate - msg: {xcm:?}, destination: {dest:?}");
// `dest` and `xcm` are required here
let dest_ref = dest.as_ref().ok_or(SendError::MissingArgument)?;
let xcm_ref = xcm.as_ref().ok_or(SendError::MissingArgument)?;
// we won't have an access to `dest` and `xcm` in the `deliver` method, so precompute
// everything required here
let message_size = xcm_ref.encoded_size() as _;
// bridge doesn't support oversized/overweight messages now. So it is better to drop such
// messages here than at the bridge hub. Let's check the message size.
if message_size > HARD_MESSAGE_SIZE_LIMIT {
return Err(SendError::ExceedsMaxMessageSize)
}
// We need to ensure that the known `dest`'s XCM version can comprehend the current `xcm`
// program. This may seem like an additional, unnecessary check, but it is not. A similar
// check is probably performed by the `ViaBridgeHubExporter`, which attempts to send a
// versioned message to the sibling bridge hub. However, the local bridge hub may have a
// higher XCM version than the remote `dest`. Once again, it is better to discard such
// messages here than at the bridge hub (e.g., to avoid losing funds).
let destination_version = T::DestinationVersion::get_version_for(dest_ref)
.ok_or(SendError::DestinationUnsupported)?;
let _ = VersionedXcm::from(xcm_ref.clone())
.into_version(destination_version)
.map_err(|()| SendError::DestinationUnsupported)?;
// just use exporter to validate destination and insert instructions to pay message fee
// at the sibling/child bridge hub
//
// the cost will include both cost of: (1) to-sibling bridge hub delivery (returned by
// the `Config::ToBridgeHubSender`) and (2) to-bridged bridge hub delivery (returned by
// `Self::exporter_for`)
ViaBridgeHubExporter::<T, I>::validate(dest, xcm)
.map(|(ticket, cost)| ((message_size, ticket), cost))
// In case of success, the `ViaBridgeHubExporter` can modify XCM instructions and consume
// `dest` / `xcm`, so we retain the clone of original message and the destination for later
// `DestinationVersion` validation.
let xcm_to_dest_clone = xcm.clone();
let dest_clone = dest.clone();
// First, use the inner exporter to validate the destination to determine if it is even
// routable. If it is not, return an error. If it is, then the XCM is extended with
// instructions to pay the message fee at the sibling/child bridge hub. The cost will
// include both the cost of (1) delivery to the sibling bridge hub (returned by
// `Config::ToBridgeHubSender`) and (2) delivery to the bridged bridge hub (returned by
// `Self::exporter_for`).
match ViaBridgeHubExporter::<T, I>::validate(dest, xcm) {
Ok((ticket, cost)) => {
// If the ticket is ok, it means we are routing with this router, so we need to
// apply more validations to the cloned `dest` and `xcm`, which are required here.
let xcm_to_dest_clone = xcm_to_dest_clone.ok_or(SendError::MissingArgument)?;
let dest_clone = dest_clone.ok_or(SendError::MissingArgument)?;
// We won't have access to `dest` and `xcm` in the `deliver` method, so we need to
// precompute everything required here. However, `dest` and `xcm` were consumed by
// `ViaBridgeHubExporter`, so we need to use their clones.
let message_size = xcm_to_dest_clone.encoded_size() as _;
// The bridge doesn't support oversized or overweight messages. Therefore, it's
// better to drop such messages here rather than at the bridge hub. Let's check the
// message size."
if message_size > HARD_MESSAGE_SIZE_LIMIT {
return Err(SendError::ExceedsMaxMessageSize)
}
// We need to ensure that the known `dest`'s XCM version can comprehend the current
// `xcm` program. This may seem like an additional, unnecessary check, but it is
// not. A similar check is probably performed by the `ViaBridgeHubExporter`, which
// attempts to send a versioned message to the sibling bridge hub. However, the
// local bridge hub may have a higher XCM version than the remote `dest`. Once
// again, it is better to discard such messages here than at the bridge hub (e.g.,
// to avoid losing funds).
let destination_version = T::DestinationVersion::get_version_for(&dest_clone)
.ok_or(SendError::DestinationUnsupported)?;
let _ = VersionedXcm::from(xcm_to_dest_clone)
.into_version(destination_version)
.map_err(|()| SendError::DestinationUnsupported)?;
Ok(((message_size, ticket), cost))
},
Err(e) => {
log::trace!(target: LOG_TARGET, "validate - ViaBridgeHubExporter - error: {e:?}");
Err(e)
},
}
}
fn deliver(ticket: Self::Ticket) -> Result<XcmHash, SendError> {
......@@ -452,24 +470,51 @@ mod tests {
#[test]
fn not_applicable_if_destination_is_within_other_network() {
run_test(|| {
// unroutable dest
let dest = Location::new(2, [GlobalConsensus(ByGenesis([0; 32])), Parachain(1000)]);
let xcm: Xcm<()> = vec![ClearOrigin].into();
// check that router does not consume when `NotApplicable`
let mut xcm_wrapper = Some(xcm.clone());
assert_eq!(
send_xcm::<XcmBridgeHubRouter>(
Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]),
vec![].into(),
),
XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper),
Err(SendError::NotApplicable),
);
// XCM is NOT consumed and untouched
assert_eq!(Some(xcm.clone()), xcm_wrapper);
// check the full `send_xcm`
assert_eq!(send_xcm::<XcmBridgeHubRouter>(dest, xcm,), Err(SendError::NotApplicable),);
});
}
#[test]
fn exceeds_max_message_size_if_size_is_above_hard_limit() {
run_test(|| {
// routable dest with XCM version
let dest =
Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]);
// oversized XCM
let xcm: Xcm<()> = vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into();
// dest is routable with the inner router
assert_ok!(ViaBridgeHubExporter::<TestRuntime, ()>::validate(
&mut Some(dest.clone()),
&mut Some(xcm.clone())
));
// check for oversized message
let mut xcm_wrapper = Some(xcm.clone());
assert_eq!(
XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper),
Err(SendError::ExceedsMaxMessageSize),
);
// XCM is consumed by the inner router
assert!(xcm_wrapper.is_none());
// check the full `send_xcm`
assert_eq!(
send_xcm::<XcmBridgeHubRouter>(
Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]),
vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(),
),
send_xcm::<XcmBridgeHubRouter>(dest, xcm,),
Err(SendError::ExceedsMaxMessageSize),
);
});
......@@ -478,11 +523,28 @@ mod tests {
#[test]
fn destination_unsupported_if_wrap_version_fails() {
run_test(|| {
// routable dest but we don't know XCM version
let dest = UnknownXcmVersionForRoutableLocation::get();
let xcm: Xcm<()> = vec![ClearOrigin].into();
// dest is routable with the inner router
assert_ok!(ViaBridgeHubExporter::<TestRuntime, ()>::validate(
&mut Some(dest.clone()),
&mut Some(xcm.clone())
));
// check that it does not pass XCM version check
let mut xcm_wrapper = Some(xcm.clone());
assert_eq!(
XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper),
Err(SendError::DestinationUnsupported),
);
// XCM is consumed by the inner router
assert!(xcm_wrapper.is_none());
// check the full `send_xcm`
assert_eq!(
send_xcm::<XcmBridgeHubRouter>(
UnknownXcmVersionLocation::get(),
vec![ClearOrigin].into(),
),
send_xcm::<XcmBridgeHubRouter>(dest, xcm,),
Err(SendError::DestinationUnsupported),
);
});
......
......@@ -61,7 +61,7 @@ parameter_types! {
Some((BridgeFeeAsset::get(), BASE_FEE).into())
)
];
pub UnknownXcmVersionLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]);
pub UnknownXcmVersionForRoutableLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]);
}
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
......@@ -76,7 +76,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime {
type BridgedNetworkId = BridgedNetworkId;
type Bridges = NetworkExportTable<BridgeTable>;
type DestinationVersion =
LatestOrNoneForLocationVersionChecker<Equals<UnknownXcmVersionLocation>>;
LatestOrNoneForLocationVersionChecker<Equals<UnknownXcmVersionForRoutableLocation>>;
type BridgeHubOrigin = EnsureRoot<AccountId>;
type ToBridgeHubSender = TestToBridgeHubSender;
......
......@@ -32,7 +32,9 @@ use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
use frame_support::PalletError;
use scale_info::TypeInfo;
use serde::{Deserialize, Serialize};
use sp_consensus_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID};
use sp_consensus_grandpa::{
AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID,
};
use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug};
use sp_std::{boxed::Box, vec::Vec};
......@@ -147,24 +149,23 @@ pub struct GrandpaConsensusLogReader<Number>(sp_std::marker::PhantomData<Number>
impl<Number: Codec> GrandpaConsensusLogReader<Number> {
/// Find and return scheduled (regular) change digest item.
pub fn find_scheduled_change(
digest: &Digest,
) -> Option<sp_consensus_grandpa::ScheduledChange<Number>> {
pub fn find_scheduled_change(digest: &Digest) -> Option<ScheduledChange<Number>> {
use sp_runtime::generic::OpaqueDigestItemId;
let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
let filter_log = |log: ConsensusLog<Number>| match log {
ConsensusLog::ScheduledChange(change) => Some(change),
_ => None,
};
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest
.convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID))
.and_then(|log| match log {
ConsensusLog::ScheduledChange(change) => Some(change),
_ => None,
})
digest.convert_first(|l| l.try_to(id).and_then(filter_log))
}
/// Find and return forced change digest item. Or light client can't do anything
/// with forced changes, so we can't accept header with the forced change digest.
pub fn find_forced_change(
digest: &Digest,
) -> Option<(Number, sp_consensus_grandpa::ScheduledChange<Number>)> {
pub fn find_forced_change(digest: &Digest) -> Option<(Number, ScheduledChange<Number>)> {
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest
......@@ -346,7 +347,7 @@ mod tests {
use super::*;
use bp_runtime::ChainId;
use frame_support::weights::Weight;
use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature};
use sp_runtime::{testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature};
struct TestChain;
......@@ -385,4 +386,35 @@ mod tests {
max_expected_submit_finality_proof_arguments_size::<TestChain>(false, 100),
);
}
#[test]
fn find_scheduled_change_works() {
let scheduled_change = ScheduledChange { next_authorities: vec![], delay: 0 };
// first
let mut digest = Digest::default();
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(),
));
assert_eq!(
GrandpaConsensusLogReader::find_scheduled_change(&digest),
Some(scheduled_change.clone())
);
// not first
let mut digest = Digest::default();
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::<u64>::OnDisabled(0).encode(),
));
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(),
));
assert_eq!(
GrandpaConsensusLogReader::find_scheduled_change(&digest),
Some(scheduled_change.clone())
);
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Benchmarking setup for pallet-session.
use sp_std::{prelude::*, vec};
use frame_benchmarking::{benchmarks, whitelisted_caller};
use frame_system::RawOrigin;
use pallet_session::*;
use parity_scale_codec::Decode;
pub struct Pallet<T: Config>(pallet_session::Pallet<T>);
pub trait Config: pallet_session::Config {}
benchmarks! {
set_keys {
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
}: _(RawOrigin::Signed(caller), keys, proof)
purge_keys {
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
let _t = pallet_session::Pallet::<T>::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof);
}: _(RawOrigin::Signed(caller))
}
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
......@@ -13,31 +15,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Benchmarking setup for pallet-session
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg(feature = "runtime-benchmarks")]
use sp_std::{prelude::*, vec};
//! Benchmarks for the Session Pallet.
// This is separated into its own crate due to cyclic dependency issues.
use frame_benchmarking::{benchmarks, whitelisted_caller};
use frame_system::RawOrigin;
use pallet_session::*;
use parity_scale_codec::Decode;
pub struct Pallet<T: Config>(pallet_session::Pallet<T>);
pub trait Config: pallet_session::Config {}
#![cfg_attr(not(feature = "std"), no_std)]
benchmarks! {
set_keys {
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
}: _(RawOrigin::Signed(caller), keys, proof)
#[cfg(feature = "runtime-benchmarks")]
pub mod inner;
purge_keys {
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
let _t = pallet_session::Pallet::<T>::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof);
}: _(RawOrigin::Signed(caller))
}
#[cfg(feature = "runtime-benchmarks")]
pub use inner::*;
......@@ -4,7 +4,11 @@
"chainType": "Live",
"bootNodes": [
"/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg",
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS"
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS",
"/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg",
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS",
"/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg",
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS"
],
"telemetryEndpoints": null,
"protocolId": null,
......@@ -5,6 +5,10 @@
"bootNodes": [
"/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd",
"/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW",
"/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd",
"/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW",
"/dns/westend-asset-hub-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd",
"/dns/westend-asset-hub-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW",
"/dns/boot.stake.plus/tcp/33333/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo",
"/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo",
"/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ",
......
......@@ -4,7 +4,11 @@
"chainType": "Live",
"bootNodes": [
"/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU",
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ"
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ",
"/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU",
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ",
"/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU",
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ"
],
"telemetryEndpoints": null,
"protocolId": null,
......@@ -5,6 +5,10 @@
"bootNodes": [
"/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY",
"/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd",
"/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY",
"/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd",
"/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY",
"/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd",
"/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F",
"/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F",
"/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw",
......@@ -5,6 +5,8 @@
"bootNodes": [
"/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe",
"/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB",
"/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe",
"/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB",
"/dns/westend-collectives-collator-0.polkadot.io/tcp/443/wss/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe",
"/dns/westend-collectives-collator-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB",
"/dns/boot.stake.plus/tcp/38333/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ",
......@@ -5,6 +5,8 @@
"bootNodes": [
"/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj",
"/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh",
"/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj",
"/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh",
"/dns/rococo-contracts-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj",
"/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh"
],
......@@ -4,7 +4,11 @@
"chainType": "Live",
"bootNodes": [
"/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy",
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX"
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX",
"/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy",
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX",
"/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy",
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX"
],
"telemetryEndpoints": null,
"protocolId": null,
......@@ -67,4 +71,4 @@
"childrenDefault": {}
}
}
}
\ No newline at end of file
}
......@@ -5,6 +5,10 @@
"bootNodes": [
"/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT",
"/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH",
"/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT",
"/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH",
"/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT",
"/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH",
"/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM",
"/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3",
"/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3",
......@@ -4,13 +4,11 @@
"chainType": "Live",
"bootNodes": [
"/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H",
"/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H",
"/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT",
"/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT",
"/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG",
"/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG"
"/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H",
"/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H"
],
"telemetryEndpoints": null,
"protocolId": null,
......@@ -79,4 +77,4 @@
"childrenDefault": {}
}
}
}
\ No newline at end of file
}