Commits on Source (36)
...@@ -15,7 +15,7 @@ jobs: ...@@ -15,7 +15,7 @@ jobs:
os: ["ubuntu-latest"] os: ["ubuntu-latest"]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
container: container:
image: docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109 image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
......
...@@ -42,7 +42,6 @@ jobs: ...@@ -42,7 +42,6 @@ jobs:
URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb
wget $URL -O tera.deb wget $URL -O tera.deb
sudo dpkg -i tera.deb sudo dpkg -i tera.deb
tera --version
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
...@@ -70,7 +69,7 @@ jobs: ...@@ -70,7 +69,7 @@ jobs:
export REF1=$(get_latest_release_tag) export REF1=$(get_latest_release_tag)
if [[ -z "${{ inputs.version }}" ]]; then if [[ -z "${{ inputs.version }}" ]]; then
export REF2="${{ github.ref }}" export REF2="${{ github.ref_name }}"
else else
export REF2="${{ inputs.version }}" export REF2="${{ inputs.version }}"
fi fi
...@@ -79,10 +78,6 @@ jobs: ...@@ -79,10 +78,6 @@ jobs:
./scripts/release/build-changelogs.sh ./scripts/release/build-changelogs.sh
echo "Checking the folder state"
pwd
ls -la scripts/release
- name: Archive artifact context.json - name: Archive artifact context.json
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
with: with:
...@@ -151,5 +146,5 @@ jobs: ...@@ -151,5 +146,5 @@ jobs:
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io server: m.parity.io
message: | message: |
**New version of polkadot tagged**: ${{ github.ref }}<br/> **New version of polkadot tagged**: ${{ github.ref_name }}<br/>
Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} Draft release created: ${{ needs.publish-release-draft.outputs.release_url }}
...@@ -21,25 +21,30 @@ jobs: ...@@ -21,25 +21,30 @@ jobs:
- name: Skip merge queue - name: Skip merge queue
if: ${{ contains(github.ref, 'gh-readonly-queue') }} if: ${{ contains(github.ref, 'gh-readonly-queue') }}
run: exit 0 run: exit 0
- name: Get comments - name: Get PR data
id: comments id: comments
run: echo "bodies=$(gh pr view ${{ github.event.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT" run: |
echo "bodies=$(gh pr view ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --json comments --jq '[.comments[].body]')" >> "$GITHUB_OUTPUT"
echo "reviews=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews --jq '[.[].state]')" >> "$GITHUB_OUTPUT"
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
- name: Fail when author pushes new code - name: Fail when author pushes new code
# Require new reviews when the author is pushing and he is not a member # Require new reviews when the author is pushing and he is not a member
if: | if: |
contains(fromJson(steps.comments.outputs.reviews), 'APPROVED') &&
github.event_name == 'pull_request_target' && github.event_name == 'pull_request_target' &&
github.event.action == 'synchronize' && github.event.action == 'synchronize' &&
github.event.sender.login == github.event.pull_request.user.login && github.event.sender.login == github.event.pull_request.user.login &&
github.event.pull_request.author_association != 'CONTRIBUTOR' &&
github.event.pull_request.author_association != 'MEMBER' github.event.pull_request.author_association != 'MEMBER'
run: | run: |
echo "User's association is ${{ github.event.pull_request.author_association }}"
# We get the list of reviewers who approved the PR # We get the list of reviewers who approved the PR
REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.number }}/reviews \ REVIEWERS=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/reviews \
--jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}') --jq '{reviewers: [.[] | select(.state == "APPROVED") | .user.login]}')
# We request them to review again # We request them to review again
echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.number }}/requested_reviewers --input - echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input -
echo "::error::Project needs to be reviewed again" echo "::error::Project needs to be reviewed again"
exit 1 exit 1
...@@ -49,7 +54,7 @@ jobs: ...@@ -49,7 +54,7 @@ jobs:
# If the previous step failed and github-actions hasn't commented yet we comment instructions # If the previous step failed and github-actions hasn't commented yet we comment instructions
if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed') if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed')
run: | run: |
gh pr comment ${{ github.event.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed"
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
COMMENTS: ${{ steps.comments.outputs.users }} COMMENTS: ${{ steps.comments.outputs.users }}
......
name: test-github-actions
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
CARGO_NET_GIT_FETCH_WITH_CLI: true
jobs:
test-linux-stable-int:
runs-on: arc-runners-polkadot-sdk
timeout-minutes: 30
container:
image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408"
env:
RUSTFLAGS: "-C debug-assertions -D warnings"
RUST_BACKTRACE: 1
WASM_BUILD_NO_COLOR: 1
WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
# Ensure we run the UI tests.
RUN_UI_TESTS: 1
steps:
- name: Checkout
uses: actions/checkout@v4
- name: script
run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored
quick-benchmarks:
runs-on: arc-runners-polkadot-sdk
timeout-minutes: 30
container:
image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408"
env:
RUSTFLAGS: "-C debug-assertions -D warnings"
RUST_BACKTRACE: "full"
WASM_BUILD_NO_COLOR: 1
WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
steps:
- name: Checkout
uses: actions/checkout@v4
- name: script
run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet
...@@ -820,17 +820,22 @@ dependencies = [ ...@@ -820,17 +820,22 @@ dependencies = [
"assert_matches", "assert_matches",
"asset-hub-rococo-runtime", "asset-hub-rococo-runtime",
"asset-test-utils", "asset-test-utils",
"cumulus-pallet-parachain-system",
"emulated-integration-tests-common", "emulated-integration-tests-common",
"frame-support", "frame-support",
"pallet-asset-conversion", "pallet-asset-conversion",
"pallet-assets", "pallet-assets",
"pallet-balances", "pallet-balances",
"pallet-message-queue", "pallet-message-queue",
"pallet-treasury",
"pallet-utility",
"pallet-xcm", "pallet-xcm",
"parachains-common", "parachains-common",
"parity-scale-codec", "parity-scale-codec",
"penpal-runtime", "penpal-runtime",
"polkadot-runtime-common",
"rococo-runtime", "rococo-runtime",
"rococo-runtime-constants",
"rococo-system-emulated-network", "rococo-system-emulated-network",
"sp-runtime", "sp-runtime",
"staging-xcm", "staging-xcm",
...@@ -866,6 +871,7 @@ dependencies = [ ...@@ -866,6 +871,7 @@ dependencies = [
"hex-literal", "hex-literal",
"log", "log",
"pallet-asset-conversion", "pallet-asset-conversion",
"pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment", "pallet-asset-conversion-tx-payment",
"pallet-assets", "pallet-assets",
"pallet-aura", "pallet-aura",
...@@ -990,6 +996,7 @@ dependencies = [ ...@@ -990,6 +996,7 @@ dependencies = [
"hex-literal", "hex-literal",
"log", "log",
"pallet-asset-conversion", "pallet-asset-conversion",
"pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment", "pallet-asset-conversion-tx-payment",
"pallet-assets", "pallet-assets",
"pallet-aura", "pallet-aura",
...@@ -2830,6 +2837,36 @@ dependencies = [ ...@@ -2830,6 +2837,36 @@ dependencies = [
"testnet-parachains-constants", "testnet-parachains-constants",
] ]
[[package]]
name = "collectives-westend-integration-tests"
version = "1.0.0"
dependencies = [
"assert_matches",
"asset-hub-westend-runtime",
"collectives-westend-runtime",
"cumulus-pallet-parachain-system",
"cumulus-pallet-xcmp-queue",
"emulated-integration-tests-common",
"frame-support",
"pallet-asset-rate",
"pallet-assets",
"pallet-balances",
"pallet-message-queue",
"pallet-treasury",
"pallet-utility",
"pallet-xcm",
"parachains-common",
"parity-scale-codec",
"polkadot-runtime-common",
"sp-runtime",
"staging-xcm",
"staging-xcm-executor",
"testnet-parachains-constants",
"westend-runtime",
"westend-runtime-constants",
"westend-system-emulated-network",
]
[[package]] [[package]]
name = "collectives-westend-runtime" name = "collectives-westend-runtime"
version = "3.0.0" version = "3.0.0"
...@@ -7348,6 +7385,7 @@ dependencies = [ ...@@ -7348,6 +7385,7 @@ dependencies = [
"node-primitives", "node-primitives",
"pallet-alliance", "pallet-alliance",
"pallet-asset-conversion", "pallet-asset-conversion",
"pallet-asset-conversion-ops",
"pallet-asset-conversion-tx-payment", "pallet-asset-conversion-tx-payment",
"pallet-asset-rate", "pallet-asset-rate",
"pallet-asset-tx-payment", "pallet-asset-tx-payment",
...@@ -7537,9 +7575,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" ...@@ -7537,9 +7575,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.152" version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]] [[package]]
name = "libflate" name = "libflate"
...@@ -9499,6 +9537,7 @@ dependencies = [ ...@@ -9499,6 +9537,7 @@ dependencies = [
"frame-benchmarking", "frame-benchmarking",
"frame-support", "frame-support",
"frame-system", "frame-system",
"log",
"pallet-assets", "pallet-assets",
"pallet-balances", "pallet-balances",
"parity-scale-codec", "parity-scale-codec",
...@@ -9512,6 +9551,27 @@ dependencies = [ ...@@ -9512,6 +9551,27 @@ dependencies = [
"sp-std 14.0.0", "sp-std 14.0.0",
] ]
[[package]]
name = "pallet-asset-conversion-ops"
version = "0.1.0"
dependencies = [
"frame-benchmarking",
"frame-support",
"frame-system",
"log",
"pallet-asset-conversion",
"pallet-assets",
"pallet-balances",
"parity-scale-codec",
"primitive-types",
"scale-info",
"sp-arithmetic",
"sp-core",
"sp-io",
"sp-runtime",
"sp-std 14.0.0",
]
[[package]] [[package]]
name = "pallet-asset-conversion-tx-payment" name = "pallet-asset-conversion-tx-payment"
version = "10.0.0" version = "10.0.0"
...@@ -9936,6 +9996,7 @@ dependencies = [ ...@@ -9936,6 +9996,7 @@ dependencies = [
"frame-benchmarking", "frame-benchmarking",
"frame-support", "frame-support",
"frame-system", "frame-system",
"log",
"parity-scale-codec", "parity-scale-codec",
"pretty_assertions", "pretty_assertions",
"scale-info", "scale-info",
...@@ -13242,7 +13303,6 @@ dependencies = [ ...@@ -13242,7 +13303,6 @@ dependencies = [
"slotmap", "slotmap",
"sp-core", "sp-core",
"sp-maybe-compressed-blob", "sp-maybe-compressed-blob",
"sp-wasm-interface 20.0.0",
"tempfile", "tempfile",
"test-parachain-adder", "test-parachain-adder",
"test-parachain-halt", "test-parachain-halt",
...@@ -13279,7 +13339,6 @@ name = "polkadot-node-core-pvf-common" ...@@ -13279,7 +13339,6 @@ name = "polkadot-node-core-pvf-common"
version = "7.0.0" version = "7.0.0"
dependencies = [ dependencies = [
"assert_matches", "assert_matches",
"cfg-if",
"cpu-time", "cpu-time",
"futures", "futures",
"landlock", "landlock",
...@@ -17404,6 +17463,7 @@ dependencies = [ ...@@ -17404,6 +17463,7 @@ dependencies = [
"sc-transaction-pool", "sc-transaction-pool",
"sc-transaction-pool-api", "sc-transaction-pool-api",
"sc-utils", "sc-utils",
"schnellru",
"serde", "serde",
"serde_json", "serde_json",
"sp-api", "sp-api",
......
...@@ -103,6 +103,7 @@ members = [ ...@@ -103,6 +103,7 @@ members = [
"cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend",
"cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo",
"cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend",
"cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend",
"cumulus/parachains/integration-tests/emulated/tests/people/people-rococo", "cumulus/parachains/integration-tests/emulated/tests/people/people-rococo",
"cumulus/parachains/integration-tests/emulated/tests/people/people-westend", "cumulus/parachains/integration-tests/emulated/tests/people/people-westend",
"cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/collective-content",
...@@ -300,6 +301,7 @@ members = [ ...@@ -300,6 +301,7 @@ members = [
"substrate/frame", "substrate/frame",
"substrate/frame/alliance", "substrate/frame/alliance",
"substrate/frame/asset-conversion", "substrate/frame/asset-conversion",
"substrate/frame/asset-conversion/ops",
"substrate/frame/asset-rate", "substrate/frame/asset-rate",
"substrate/frame/assets", "substrate/frame/assets",
"substrate/frame/atomic-swap", "substrate/frame/atomic-swap",
......
...@@ -328,40 +328,58 @@ impl<T: Config<I>, I: 'static> SendXcm for Pallet<T, I> { ...@@ -328,40 +328,58 @@ impl<T: Config<I>, I: 'static> SendXcm for Pallet<T, I> {
xcm: &mut Option<Xcm<()>>, xcm: &mut Option<Xcm<()>>,
) -> SendResult<Self::Ticket> { ) -> SendResult<Self::Ticket> {
log::trace!(target: LOG_TARGET, "validate - msg: {xcm:?}, destination: {dest:?}"); log::trace!(target: LOG_TARGET, "validate - msg: {xcm:?}, destination: {dest:?}");
// `dest` and `xcm` are required here
let dest_ref = dest.as_ref().ok_or(SendError::MissingArgument)?;
let xcm_ref = xcm.as_ref().ok_or(SendError::MissingArgument)?;
// we won't have an access to `dest` and `xcm` in the `deliver` method, so precompute // In case of success, the `ViaBridgeHubExporter` can modify XCM instructions and consume
// everything required here // `dest` / `xcm`, so we retain the clone of original message and the destination for later
let message_size = xcm_ref.encoded_size() as _; // `DestinationVersion` validation.
let xcm_to_dest_clone = xcm.clone();
// bridge doesn't support oversized/overweight messages now. So it is better to drop such let dest_clone = dest.clone();
// messages here than at the bridge hub. Let's check the message size.
// First, use the inner exporter to validate the destination to determine if it is even
// routable. If it is not, return an error. If it is, then the XCM is extended with
// instructions to pay the message fee at the sibling/child bridge hub. The cost will
// include both the cost of (1) delivery to the sibling bridge hub (returned by
// `Config::ToBridgeHubSender`) and (2) delivery to the bridged bridge hub (returned by
// `Self::exporter_for`).
match ViaBridgeHubExporter::<T, I>::validate(dest, xcm) {
Ok((ticket, cost)) => {
// If the ticket is ok, it means we are routing with this router, so we need to
// apply more validations to the cloned `dest` and `xcm`, which are required here.
let xcm_to_dest_clone = xcm_to_dest_clone.ok_or(SendError::MissingArgument)?;
let dest_clone = dest_clone.ok_or(SendError::MissingArgument)?;
// We won't have access to `dest` and `xcm` in the `deliver` method, so we need to
// precompute everything required here. However, `dest` and `xcm` were consumed by
// `ViaBridgeHubExporter`, so we need to use their clones.
let message_size = xcm_to_dest_clone.encoded_size() as _;
// The bridge doesn't support oversized or overweight messages. Therefore, it's
// better to drop such messages here rather than at the bridge hub. Let's check the
// message size."
if message_size > HARD_MESSAGE_SIZE_LIMIT { if message_size > HARD_MESSAGE_SIZE_LIMIT {
return Err(SendError::ExceedsMaxMessageSize) return Err(SendError::ExceedsMaxMessageSize)
} }
// We need to ensure that the known `dest`'s XCM version can comprehend the current `xcm` // We need to ensure that the known `dest`'s XCM version can comprehend the current
// program. This may seem like an additional, unnecessary check, but it is not. A similar // `xcm` program. This may seem like an additional, unnecessary check, but it is
// check is probably performed by the `ViaBridgeHubExporter`, which attempts to send a // not. A similar check is probably performed by the `ViaBridgeHubExporter`, which
// versioned message to the sibling bridge hub. However, the local bridge hub may have a // attempts to send a versioned message to the sibling bridge hub. However, the
// higher XCM version than the remote `dest`. Once again, it is better to discard such // local bridge hub may have a higher XCM version than the remote `dest`. Once
// messages here than at the bridge hub (e.g., to avoid losing funds). // again, it is better to discard such messages here than at the bridge hub (e.g.,
let destination_version = T::DestinationVersion::get_version_for(dest_ref) // to avoid losing funds).
let destination_version = T::DestinationVersion::get_version_for(&dest_clone)
.ok_or(SendError::DestinationUnsupported)?; .ok_or(SendError::DestinationUnsupported)?;
let _ = VersionedXcm::from(xcm_ref.clone()) let _ = VersionedXcm::from(xcm_to_dest_clone)
.into_version(destination_version) .into_version(destination_version)
.map_err(|()| SendError::DestinationUnsupported)?; .map_err(|()| SendError::DestinationUnsupported)?;
// just use exporter to validate destination and insert instructions to pay message fee Ok(((message_size, ticket), cost))
// at the sibling/child bridge hub },
// Err(e) => {
// the cost will include both cost of: (1) to-sibling bridge hub delivery (returned by log::trace!(target: LOG_TARGET, "validate - ViaBridgeHubExporter - error: {e:?}");
// the `Config::ToBridgeHubSender`) and (2) to-bridged bridge hub delivery (returned by Err(e)
// `Self::exporter_for`) },
ViaBridgeHubExporter::<T, I>::validate(dest, xcm) }
.map(|(ticket, cost)| ((message_size, ticket), cost))
} }
fn deliver(ticket: Self::Ticket) -> Result<XcmHash, SendError> { fn deliver(ticket: Self::Ticket) -> Result<XcmHash, SendError> {
...@@ -452,24 +470,51 @@ mod tests { ...@@ -452,24 +470,51 @@ mod tests {
#[test] #[test]
fn not_applicable_if_destination_is_within_other_network() { fn not_applicable_if_destination_is_within_other_network() {
run_test(|| { run_test(|| {
// unroutable dest
let dest = Location::new(2, [GlobalConsensus(ByGenesis([0; 32])), Parachain(1000)]);
let xcm: Xcm<()> = vec![ClearOrigin].into();
// check that router does not consume when `NotApplicable`
let mut xcm_wrapper = Some(xcm.clone());
assert_eq!( assert_eq!(
send_xcm::<XcmBridgeHubRouter>( XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper),
Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]),
vec![].into(),
),
Err(SendError::NotApplicable), Err(SendError::NotApplicable),
); );
// XCM is NOT consumed and untouched
assert_eq!(Some(xcm.clone()), xcm_wrapper);
// check the full `send_xcm`
assert_eq!(send_xcm::<XcmBridgeHubRouter>(dest, xcm,), Err(SendError::NotApplicable),);
}); });
} }
#[test] #[test]
fn exceeds_max_message_size_if_size_is_above_hard_limit() { fn exceeds_max_message_size_if_size_is_above_hard_limit() {
run_test(|| { run_test(|| {
// routable dest with XCM version
let dest =
Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]);
// oversized XCM
let xcm: Xcm<()> = vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into();
// dest is routable with the inner router
assert_ok!(ViaBridgeHubExporter::<TestRuntime, ()>::validate(
&mut Some(dest.clone()),
&mut Some(xcm.clone())
));
// check for oversized message
let mut xcm_wrapper = Some(xcm.clone());
assert_eq!( assert_eq!(
send_xcm::<XcmBridgeHubRouter>( XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper),
Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), Err(SendError::ExceedsMaxMessageSize),
vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(), );
), // XCM is consumed by the inner router
assert!(xcm_wrapper.is_none());
// check the full `send_xcm`
assert_eq!(
send_xcm::<XcmBridgeHubRouter>(dest, xcm,),
Err(SendError::ExceedsMaxMessageSize), Err(SendError::ExceedsMaxMessageSize),
); );
}); });
...@@ -478,11 +523,28 @@ mod tests { ...@@ -478,11 +523,28 @@ mod tests {
#[test] #[test]
fn destination_unsupported_if_wrap_version_fails() { fn destination_unsupported_if_wrap_version_fails() {
run_test(|| { run_test(|| {
// routable dest but we don't know XCM version
let dest = UnknownXcmVersionForRoutableLocation::get();
let xcm: Xcm<()> = vec![ClearOrigin].into();
// dest is routable with the inner router
assert_ok!(ViaBridgeHubExporter::<TestRuntime, ()>::validate(
&mut Some(dest.clone()),
&mut Some(xcm.clone())
));
// check that it does not pass XCM version check
let mut xcm_wrapper = Some(xcm.clone());
assert_eq!( assert_eq!(
send_xcm::<XcmBridgeHubRouter>( XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut xcm_wrapper),
UnknownXcmVersionLocation::get(), Err(SendError::DestinationUnsupported),
vec![ClearOrigin].into(), );
), // XCM is consumed by the inner router
assert!(xcm_wrapper.is_none());
// check the full `send_xcm`
assert_eq!(
send_xcm::<XcmBridgeHubRouter>(dest, xcm,),
Err(SendError::DestinationUnsupported), Err(SendError::DestinationUnsupported),
); );
}); });
......
...@@ -61,7 +61,7 @@ parameter_types! { ...@@ -61,7 +61,7 @@ parameter_types! {
Some((BridgeFeeAsset::get(), BASE_FEE).into()) Some((BridgeFeeAsset::get(), BASE_FEE).into())
) )
]; ];
pub UnknownXcmVersionLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]); pub UnknownXcmVersionForRoutableLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]);
} }
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
...@@ -76,7 +76,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { ...@@ -76,7 +76,7 @@ impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime {
type BridgedNetworkId = BridgedNetworkId; type BridgedNetworkId = BridgedNetworkId;
type Bridges = NetworkExportTable<BridgeTable>; type Bridges = NetworkExportTable<BridgeTable>;
type DestinationVersion = type DestinationVersion =
LatestOrNoneForLocationVersionChecker<Equals<UnknownXcmVersionLocation>>; LatestOrNoneForLocationVersionChecker<Equals<UnknownXcmVersionForRoutableLocation>>;
type BridgeHubOrigin = EnsureRoot<AccountId>; type BridgeHubOrigin = EnsureRoot<AccountId>;
type ToBridgeHubSender = TestToBridgeHubSender; type ToBridgeHubSender = TestToBridgeHubSender;
......
...@@ -32,7 +32,9 @@ use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; ...@@ -32,7 +32,9 @@ use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
use frame_support::PalletError; use frame_support::PalletError;
use scale_info::TypeInfo; use scale_info::TypeInfo;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sp_consensus_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; use sp_consensus_grandpa::{
AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID,
};
use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug}; use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug};
use sp_std::{boxed::Box, vec::Vec}; use sp_std::{boxed::Box, vec::Vec};
...@@ -147,24 +149,23 @@ pub struct GrandpaConsensusLogReader<Number>(sp_std::marker::PhantomData<Number> ...@@ -147,24 +149,23 @@ pub struct GrandpaConsensusLogReader<Number>(sp_std::marker::PhantomData<Number>
impl<Number: Codec> GrandpaConsensusLogReader<Number> { impl<Number: Codec> GrandpaConsensusLogReader<Number> {
/// Find and return scheduled (regular) change digest item. /// Find and return scheduled (regular) change digest item.
pub fn find_scheduled_change( pub fn find_scheduled_change(digest: &Digest) -> Option<ScheduledChange<Number>> {
digest: &Digest, use sp_runtime::generic::OpaqueDigestItemId;
) -> Option<sp_consensus_grandpa::ScheduledChange<Number>> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log. let filter_log = |log: ConsensusLog<Number>| match log {
digest
.convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID))
.and_then(|log| match log {
ConsensusLog::ScheduledChange(change) => Some(change), ConsensusLog::ScheduledChange(change) => Some(change),
_ => None, _ => None,
}) };
// find the first consensus digest with the right ID which converts to
// the right kind of consensus log.
digest.convert_first(|l| l.try_to(id).and_then(filter_log))
} }
/// Find and return forced change digest item. Or light client can't do anything /// Find and return forced change digest item. Or light client can't do anything
/// with forced changes, so we can't accept header with the forced change digest. /// with forced changes, so we can't accept header with the forced change digest.
pub fn find_forced_change( pub fn find_forced_change(digest: &Digest) -> Option<(Number, ScheduledChange<Number>)> {
digest: &Digest,
) -> Option<(Number, sp_consensus_grandpa::ScheduledChange<Number>)> {
// find the first consensus digest with the right ID which converts to // find the first consensus digest with the right ID which converts to
// the right kind of consensus log. // the right kind of consensus log.
digest digest
...@@ -346,7 +347,7 @@ mod tests { ...@@ -346,7 +347,7 @@ mod tests {
use super::*; use super::*;
use bp_runtime::ChainId; use bp_runtime::ChainId;
use frame_support::weights::Weight; use frame_support::weights::Weight;
use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature}; use sp_runtime::{testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature};
struct TestChain; struct TestChain;
...@@ -385,4 +386,35 @@ mod tests { ...@@ -385,4 +386,35 @@ mod tests {
max_expected_submit_finality_proof_arguments_size::<TestChain>(false, 100), max_expected_submit_finality_proof_arguments_size::<TestChain>(false, 100),
); );
} }
#[test]
fn find_scheduled_change_works() {
let scheduled_change = ScheduledChange { next_authorities: vec![], delay: 0 };
// first
let mut digest = Digest::default();
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(),
));
assert_eq!(
GrandpaConsensusLogReader::find_scheduled_change(&digest),
Some(scheduled_change.clone())
);
// not first
let mut digest = Digest::default();
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::<u64>::OnDisabled(0).encode(),
));
digest.push(DigestItem::Consensus(
GRANDPA_ENGINE_ID,
ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(),
));
assert_eq!(
GrandpaConsensusLogReader::find_scheduled_change(&digest),
Some(scheduled_change.clone())
);
}
} }
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Benchmarking setup for pallet-session.
use sp_std::{prelude::*, vec};
use frame_benchmarking::{benchmarks, whitelisted_caller};
use frame_system::RawOrigin;
use pallet_session::*;
use parity_scale_codec::Decode;
pub struct Pallet<T: Config>(pallet_session::Pallet<T>);
pub trait Config: pallet_session::Config {}
benchmarks! {
set_keys {
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
}: _(RawOrigin::Signed(caller), keys, proof)
purge_keys {
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
let _t = pallet_session::Pallet::<T>::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof);
}: _(RawOrigin::Signed(caller))
}
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd. // Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
...@@ -13,31 +15,13 @@ ...@@ -13,31 +15,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//! Benchmarking setup for pallet-session //! Benchmarks for the Session Pallet.
#![cfg_attr(not(feature = "std"), no_std)] // This is separated into its own crate due to cyclic dependency issues.
#![cfg(feature = "runtime-benchmarks")]
use sp_std::{prelude::*, vec};
use frame_benchmarking::{benchmarks, whitelisted_caller}; #![cfg_attr(not(feature = "std"), no_std)]
use frame_system::RawOrigin;
use pallet_session::*;
use parity_scale_codec::Decode;
pub struct Pallet<T: Config>(pallet_session::Pallet<T>);
pub trait Config: pallet_session::Config {}
benchmarks! { #[cfg(feature = "runtime-benchmarks")]
set_keys { pub mod inner;
let caller: T::AccountId = whitelisted_caller();
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
}: _(RawOrigin::Signed(caller), keys, proof)
purge_keys { #[cfg(feature = "runtime-benchmarks")]
let caller: T::AccountId = whitelisted_caller(); pub use inner::*;
frame_system::Pallet::<T>::inc_providers(&caller);
let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap();
let proof: Vec<u8> = vec![0,1,2,3];
let _t = pallet_session::Pallet::<T>::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof);
}: _(RawOrigin::Signed(caller))
}
...@@ -4,7 +4,11 @@ ...@@ -4,7 +4,11 @@
"chainType": "Live", "chainType": "Live",
"bootNodes": [ "bootNodes": [
"/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg",
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS" "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS",
"/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg",
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS",
"/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg",
"/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS"
], ],
"telemetryEndpoints": null, "telemetryEndpoints": null,
"protocolId": null, "protocolId": null,
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
"bootNodes": [ "bootNodes": [
"/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", "/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd",
"/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", "/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW",
"/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd",
"/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW",
"/dns/westend-asset-hub-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd",
"/dns/westend-asset-hub-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW",
"/dns/boot.stake.plus/tcp/33333/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.stake.plus/tcp/33333/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo",
"/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo",
"/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", "/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ",
......
...@@ -4,7 +4,11 @@ ...@@ -4,7 +4,11 @@
"chainType": "Live", "chainType": "Live",
"bootNodes": [ "bootNodes": [
"/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU",
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ" "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ",
"/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU",
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ",
"/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU",
"/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ"
], ],
"telemetryEndpoints": null, "telemetryEndpoints": null,
"protocolId": null, "protocolId": null,
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
"bootNodes": [ "bootNodes": [
"/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY",
"/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd",
"/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY",
"/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd",
"/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY",
"/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd",
"/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F",
"/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F",
"/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", "/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw",
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
"bootNodes": [ "bootNodes": [
"/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe",
"/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", "/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB",
"/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe",
"/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB",
"/dns/westend-collectives-collator-0.polkadot.io/tcp/443/wss/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-0.polkadot.io/tcp/443/wss/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe",
"/dns/westend-collectives-collator-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", "/dns/westend-collectives-collator-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB",
"/dns/boot.stake.plus/tcp/38333/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", "/dns/boot.stake.plus/tcp/38333/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ",
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
"bootNodes": [ "bootNodes": [
"/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj",
"/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", "/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh",
"/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj",
"/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh",
"/dns/rococo-contracts-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj",
"/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh" "/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh"
], ],
...@@ -4,7 +4,11 @@ ...@@ -4,7 +4,11 @@
"chainType": "Live", "chainType": "Live",
"bootNodes": [ "bootNodes": [
"/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy",
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX",
"/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy",
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX",
"/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy",
"/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX"
], ],
"telemetryEndpoints": null, "telemetryEndpoints": null,
"protocolId": null, "protocolId": null,
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
"bootNodes": [ "bootNodes": [
"/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT",
"/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH",
"/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT",
"/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH",
"/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT",
"/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH",
"/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM", "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM",
"/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3",
"/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3",
...@@ -4,13 +4,11 @@ ...@@ -4,13 +4,11 @@
"chainType": "Live", "chainType": "Live",
"bootNodes": [ "bootNodes": [
"/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H",
"/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT", "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H",
"/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT", "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5",
"/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG", "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H"
"/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG"
], ],
"telemetryEndpoints": null, "telemetryEndpoints": null,
"protocolId": null, "protocolId": null,