Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • parity/mirrors/polkadot-sdk
1 result
Show changes
Commits on Source (20)
Showing
with 128 additions and 134 deletions
......@@ -15,7 +15,7 @@ exclude = [
[formatting]
reorder_arrays = true
inline_table_expand = false
array_auto_expand = false
array_auto_expand = true
array_auto_collapse = false
indent_string = " " # tab
......
......@@ -102,7 +102,6 @@ jobs:
--exclude
"substrate/frame/contracts/fixtures/build"
"substrate/frame/contracts/fixtures/contracts/common"
"substrate/frame/revive/fixtures/contracts/common"
- name: deny git deps
run: python3 .github/scripts/deny-git-deps.py .
check-markdown:
......
......@@ -281,7 +281,7 @@ jobs:
uses: docker/setup-buildx-action@f7ce87c1d6bead3e36075b2ce75da1f6cc28aaca # v3.9.0
- name: Cache Docker layers
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
......
......@@ -220,7 +220,7 @@ jobs:
wait_build_images:
needs: [ci-env]
runs-on: ubuntu-latest
timeout-minutes: 60
timeout-minutes: 90
outputs:
BUILD_RUN_ID: ${{ steps.wait_build.outputs.BUILD_RUN_ID }}
steps:
......
......@@ -10873,11 +10873,10 @@ dependencies = [
 
[[package]]
name = "num-integer"
version = "0.1.45"
version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [
"autocfg",
"num-traits",
]
 
......@@ -12848,6 +12847,9 @@ dependencies = [
"hex-literal",
"impl-trait-for-tuples",
"log",
"num-bigint",
"num-integer",
"num-traits",
"pallet-balances",
"pallet-proxy",
"pallet-revive-fixtures",
......@@ -12863,6 +12865,7 @@ dependencies = [
"pretty_assertions",
"rand 0.8.5",
"rand_pcg",
"ripemd",
"rlp 0.6.1",
"scale-info",
"secp256k1 0.28.2",
......@@ -12880,6 +12883,7 @@ dependencies = [
"sp-tracing 16.0.0",
"staging-xcm",
"staging-xcm-builder",
"substrate-bn",
"subxt-signer 0.38.0",
]
 
......@@ -12924,6 +12928,8 @@ name = "pallet-revive-fixtures"
version = "0.1.0"
dependencies = [
"anyhow",
"cargo_metadata",
"pallet-revive-uapi",
"polkavm-linker 0.21.0",
"sp-core 28.0.0",
"sp-io 30.0.0",
......@@ -24532,6 +24538,19 @@ dependencies = [
"zeroize",
]
 
[[package]]
name = "substrate-bn"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b5bbfa79abbae15dd642ea8176a21a635ff3c00059961d1ea27ad04e5b441c"
dependencies = [
"byteorder",
"crunchy",
"lazy_static",
"rand 0.8.5",
"rustc-hex",
]
[[package]]
name = "substrate-build-script-utils"
version = "11.0.0"
......
......@@ -645,6 +645,7 @@ bitvec = { version = "1.0.1", default-features = false }
blake2 = { version = "0.10.4", default-features = false }
blake2b_simd = { version = "1.0.2", default-features = false }
blake3 = { version = "1.5" }
bn = { package = "substrate-bn", version = "0.6", default-features = false }
bounded-collections = { version = "0.2.3", default-features = false }
bounded-vec = { version = "0.7" }
bp-asset-hub-rococo = { path = "bridges/chains/chain-asset-hub-rococo", default-features = false }
......@@ -888,8 +889,9 @@ node-rpc = { path = "substrate/bin/node/rpc" }
node-testing = { path = "substrate/bin/node/testing" }
nohash-hasher = { version = "0.2.0" }
novelpoly = { version = "2.0.0", package = "reed-solomon-novelpoly" }
num-bigint = { version = "0.4.3" }
num-bigint = { version = "0.4.3", default-features = false }
num-format = { version = "0.4.3" }
num-integer = { version = "0.1.46", default-features = false }
num-rational = { version = "0.4.1" }
num-traits = { version = "0.2.17", default-features = false }
num_cpus = { version = "1.13.1" }
......@@ -1137,6 +1139,7 @@ relay-substrate-client = { path = "bridges/relays/client-substrate" }
relay-utils = { path = "bridges/relays/utils" }
remote-externalities = { path = "substrate/utils/frame/remote-externalities", default-features = false, package = "frame-remote-externalities" }
reqwest = { version = "0.12.9", default-features = false }
ripemd = { version = "0.1.3", default-features = false }
rlp = { version = "0.6.1", default-features = false }
rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/relays/rococo" }
rococo-parachain-runtime = { path = "cumulus/parachains/runtimes/testing/rococo-parachain" }
......
......@@ -283,7 +283,6 @@ pub mod pallet {
/// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure
/// that the pallet can always make progress.
#[pallet::storage]
#[pallet::getter(fn request_count)]
pub type RequestCount<T: Config<I>, I: 'static = ()> = StorageValue<_, u32, ValueQuery>;
/// High level info about the imported commitments.
......@@ -392,7 +391,7 @@ pub mod pallet {
init_data: InitializationDataOf<T, I>,
) -> Result<(), Error<T, I>> {
if init_data.authority_set.len == 0 {
return Err(Error::<T, I>::InvalidInitialAuthoritySet)
return Err(Error::<T, I>::InvalidInitialAuthoritySet);
}
CurrentAuthoritySetInfo::<T, I>::put(init_data.authority_set);
......@@ -404,6 +403,13 @@ pub mod pallet {
Ok(())
}
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// The current number of requests which have written to storage.
pub fn request_count() -> u32 {
RequestCount::<T, I>::get()
}
}
}
#[cfg(test)]
......
......@@ -418,7 +418,6 @@ pub mod pallet {
/// Hash of the best finalized header.
#[pallet::storage]
#[pallet::getter(fn best_finalized)]
pub type BestFinalized<T: Config<I>, I: 'static = ()> =
StorageValue<_, BridgedBlockId<T, I>, OptionQuery>;
......@@ -821,6 +820,13 @@ pub fn initialize_for_benchmarks<T: Config<I>, I: 'static>(header: BridgedHeader
.expect("only used from benchmarks; benchmarks are correct; qed");
}
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Returns the hash of the best finalized header.
pub fn best_finalized() -> Option<BridgedBlockId<T, I>> {
BestFinalized::<T, I>::get()
}
}
#[cfg(test)]
mod tests {
use super::*;
......
......@@ -506,14 +506,12 @@ pub mod pallet {
/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
/// flag directly or call the `set_operating_mode`).
#[pallet::storage]
#[pallet::getter(fn module_owner)]
pub type PalletOwner<T: Config<I>, I: 'static = ()> = StorageValue<_, T::AccountId>;
/// The current operating mode of the pallet.
///
/// Depending on the mode either all, some, or no transactions will be allowed.
#[pallet::storage]
#[pallet::getter(fn operating_mode)]
pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
StorageValue<_, MessagesOperatingMode, ValueQuery>;
......@@ -733,7 +731,7 @@ fn ensure_normal_operating_mode<T: Config<I>, I: 'static>() -> Result<(), Error<
if PalletOperatingMode::<T, I>::get() ==
MessagesOperatingMode::Basic(BasicOperatingMode::Normal)
{
return Ok(())
return Ok(());
}
Err(Error::<T, I>::NotOperatingNormally)
......
......@@ -235,6 +235,30 @@ pub mod pallet {
}
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Relayers that have reserved some of their balance to get free priority boost
/// for their message delivery transactions.
pub fn registered_relayer(
relayer: &T::AccountId,
) -> Option<Registration<BlockNumberFor<T>, T::Balance>> {
RegisteredRelayers::<T, I>::get(relayer)
}
/// Map of the relayer => accumulated reward.
pub fn relayer_reward<EncodeLikeAccountId, EncodeLikeReward>(
key1: EncodeLikeAccountId,
key2: EncodeLikeReward,
) -> Option<<RelayerRewardsKeyProviderOf<T, I> as StorageDoubleMapKeyProvider>::Value>
where
EncodeLikeAccountId: codec::EncodeLike<
<RelayerRewardsKeyProviderOf<T, I> as StorageDoubleMapKeyProvider>::Key1,
>,
EncodeLikeReward: codec::EncodeLike<
<RelayerRewardsKeyProviderOf<T, I> as StorageDoubleMapKeyProvider>::Key2,
>,
{
RelayerRewards::<T, I>::get(key1, key2)
}
fn do_claim_rewards(
relayer: T::AccountId,
reward_kind: T::Reward,
......@@ -289,7 +313,7 @@ pub mod pallet {
// registration is inactive if relayer stake is less than required
if registration.stake < Self::required_stake() {
return false
return false;
}
// registration is inactive if it ends soon
......@@ -297,7 +321,7 @@ pub mod pallet {
.valid_till
.saturating_sub(frame_system::Pallet::<T>::block_number());
if remaining_lease <= Self::required_registration_lease() {
return false
return false;
}
true
......@@ -319,7 +343,7 @@ pub mod pallet {
relayer,
);
return
return;
},
};
let slash_destination = slash_destination.into_account();
......@@ -380,7 +404,7 @@ pub mod pallet {
reward_balance: T::RewardBalance,
) {
if reward_balance.is_zero() {
return
return;
}
RelayerRewards::<T, I>::mutate(
......@@ -512,7 +536,6 @@ pub mod pallet {
/// Map of the relayer => accumulated reward.
#[pallet::storage]
#[pallet::getter(fn relayer_reward)]
pub type RelayerRewards<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
_,
<RelayerRewardsKeyProviderOf<T, I> as StorageDoubleMapKeyProvider>::Hasher1,
......@@ -530,7 +553,6 @@ pub mod pallet {
/// priority and will be rejected (without significant tip) in case if registered
/// relayer is present.
#[pallet::storage]
#[pallet::getter(fn registered_relayer)]
pub type RegisteredRelayers<T: Config<I>, I: 'static = ()> = StorageMap<
_,
Blake2_128Concat,
......@@ -606,7 +628,8 @@ mod tests {
150,
));
// check if registered
let registration = Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER).unwrap();
let registration =
Pallet::<TestRuntime>::registered_relayer(&REGISTER_RELAYER).unwrap();
assert_eq!(registration, Registration { valid_till: 150, stake: Stake::get() });
// slash and deregister
......@@ -787,7 +810,7 @@ mod tests {
));
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get());
assert_eq!(
Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER),
Pallet::<TestRuntime>::registered_relayer(&REGISTER_RELAYER),
Some(Registration { valid_till: 150, stake: Stake::get() }),
);
......@@ -855,7 +878,7 @@ mod tests {
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get());
assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance + 1);
assert_eq!(
Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER),
Pallet::<TestRuntime>::registered_relayer(&REGISTER_RELAYER),
Some(Registration { valid_till: 150, stake: Stake::get() }),
);
......@@ -919,7 +942,7 @@ mod tests {
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get());
assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance - 1);
assert_eq!(
Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER),
Pallet::<TestRuntime>::registered_relayer(&REGISTER_RELAYER),
Some(Registration { valid_till: 150, stake: Stake::get() }),
);
......
......@@ -120,18 +120,18 @@ pub mod pallet {
fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
// if XCM channel is still congested, we don't change anything
if T::LocalXcmChannelManager::is_congested(&T::SiblingBridgeHubLocation::get()) {
return T::WeightInfo::on_initialize_when_congested()
return T::WeightInfo::on_initialize_when_congested();
}
// if bridge has reported congestion, we don't change anything
let mut bridge = Self::bridge();
if bridge.is_congested {
return T::WeightInfo::on_initialize_when_congested()
return T::WeightInfo::on_initialize_when_congested();
}
// if we can't decrease the delivery fee factor anymore, we don't change anything
if bridge.delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR {
return T::WeightInfo::on_initialize_when_congested()
return T::WeightInfo::on_initialize_when_congested();
}
let previous_factor = bridge.delivery_fee_factor;
......@@ -190,10 +190,14 @@ pub mod pallet {
/// primitives (lane-id aka bridge-id, derived from XCM locations) to support multiple bridges
/// by the same pallet instance.
#[pallet::storage]
#[pallet::getter(fn bridge)]
pub type Bridge<T: Config<I>, I: 'static = ()> = StorageValue<_, BridgeState, ValueQuery>;
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Bridge that we are using.
pub fn bridge() -> BridgeState {
Bridge::<T, I>::get()
}
/// Called when new message is sent (queued to local outbound XCM queue) over the bridge.
pub(crate) fn on_message_sent_to_bridge(message_size: u32) {
log::trace!(
......@@ -208,7 +212,7 @@ pub mod pallet {
// if outbound queue is not congested AND bridge has not reported congestion, do
// nothing
if !is_channel_with_bridge_hub_congested && !is_bridge_congested {
return Err(())
return Err(());
}
// ok - we need to increase the fee factor, let's do that
......@@ -276,7 +280,7 @@ impl<T: Config<I>, I: 'static> ExporterFor for Pallet<T, I> {
target: LOG_TARGET,
"Router with bridged_network_id {bridged_network:?} does not support bridging to network {network:?}!",
);
return None
return None;
}
}
......@@ -298,7 +302,7 @@ impl<T: Config<I>, I: 'static> ExporterFor for Pallet<T, I> {
network,
remote_location,
);
return None
return None;
},
};
......@@ -318,7 +322,7 @@ impl<T: Config<I>, I: 'static> ExporterFor for Pallet<T, I> {
network,
remote_location,
);
return None
return None;
},
},
None => 0,
......@@ -388,7 +392,7 @@ impl<T: Config<I>, I: 'static> SendXcm for Pallet<T, I> {
// better to drop such messages here rather than at the bridge hub. Let's check the
// message size."
if message_size > HARD_MESSAGE_SIZE_LIMIT {
return Err(SendError::ExceedsMaxMessageSize)
return Err(SendError::ExceedsMaxMessageSize);
}
// We need to ensure that the known `dest`'s XCM version can comprehend the current
......
......@@ -425,6 +425,20 @@ pub trait OwnedBridgeModule<T: frame_system::Config> {
log::info!(target: Self::LOG_TARGET, "Setting operating mode to {:?}.", operating_mode);
Ok(())
}
/// Pallet owner has a right to halt all module operations and then resume it. If it is `None`,
/// then there are no direct ways to halt/resume module operations, but other runtime methods
/// may still be used to do that (i.e. democracy::referendum to update halt flag directly
/// or call the `set_operating_mode`).
fn module_owner() -> Option<T::AccountId> {
Self::OwnerStorage::get()
}
/// The current operating mode of the module.
/// Depending on the mode either all, some, or no transactions will be allowed.
fn operating_mode() -> Self::OperatingMode {
Self::OperatingModeStorage::get()
}
}
/// All extra operations with weights that we need in bridges.
......
......@@ -20,7 +20,7 @@ use clap::Parser;
use codec::{Decode, Encode};
use polkadot_node_primitives::{BlockData, PoV, POV_BOMB_LIMIT};
use polkadot_parachain_primitives::primitives::ValidationParams;
use polkadot_primitives::{BlockNumber as RBlockNumber, Hash as RHash, HeadData};
use polkadot_primitives::PersistedValidationData;
use sc_executor::WasmExecutor;
use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode, WrappedRuntimeCode};
use std::{fs, path::PathBuf, time::Instant};
......@@ -104,17 +104,10 @@ fn main() -> anyhow::Result<()> {
tracing::error!(%error, "Failed to decode `PoV`");
anyhow::anyhow!("Failed to decode `PoV`")
})?;
let head_data = HeadData::decode(pov_file_ptr).map_err(|error| {
tracing::error!(%error, "Failed to `HeadData`");
anyhow::anyhow!("Failed to decode `HeadData`")
})?;
let relay_parent_storage_root = RHash::decode(pov_file_ptr).map_err(|error| {
tracing::error!(%error, "Failed to relay storage root");
anyhow::anyhow!("Failed to decode relay storage root")
})?;
let relay_parent_number = RBlockNumber::decode(pov_file_ptr).map_err(|error| {
tracing::error!(%error, "Failed to relay block number");
anyhow::anyhow!("Failed to decode relay block number")
let pvd = PersistedValidationData::decode(pov_file_ptr).map_err(|error| {
tracing::error!(%error, "Failed to `PersistedValidationData`");
anyhow::anyhow!("Failed to decode `PersistedValidationData`")
})?;
let pov = sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT).map_err(
......@@ -125,9 +118,9 @@ fn main() -> anyhow::Result<()> {
)?;
let validation_params = ValidationParams {
relay_parent_number,
relay_parent_storage_root,
parent_head: head_data,
relay_parent_number: pvd.relay_parent_number,
relay_parent_storage_root: pvd.relay_parent_storage_root,
parent_head: pvd.parent_head,
block_data: BlockData(pov.into()),
};
......
......@@ -398,7 +398,7 @@ where
aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?;
let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header);
block_import_params.post_digests.push(seal_digest);
block_import_params.body = Some(body.clone());
block_import_params.body = Some(body);
block_import_params.state_action =
StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes));
block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
......
......@@ -409,6 +409,7 @@ where
parent_header.clone(),
*relay_parent_header.state_root(),
*relay_parent_header.number(),
validation_data.max_pov_size,
);
}
......
......@@ -375,6 +375,7 @@ where
parachain_candidate: candidate,
validation_code_hash,
core_index: *core_index,
max_pov_size: validation_data.max_pov_size,
}) {
tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task.");
return
......
......@@ -126,6 +126,7 @@ async fn handle_collation_message<Block: BlockT, RClient: RelayChainInterface +
validation_code_hash,
relay_parent,
core_index,
max_pov_size,
} = message;
let hash = parachain_candidate.block.header().hash();
......@@ -160,6 +161,7 @@ async fn handle_collation_message<Block: BlockT, RClient: RelayChainInterface +
parent_header.clone(),
relay_parent_header.state_root,
relay_parent_header.number,
max_pov_size,
);
} else {
tracing::error!(target: LOG_TARGET, "Failed to get relay parent header from hash: {relay_parent:?}");
......
......@@ -255,6 +255,8 @@ struct CollatorMessage<Block: BlockT> {
pub validation_code_hash: ValidationCodeHash,
/// Core index that this block should be submitted on
pub core_index: CoreIndex,
/// Maximum pov size. Currently needed only for exporting PoV.
pub max_pov_size: u32,
}
/// Fetch the `CoreSelector` and `ClaimQueueOffset` for `parent_hash`.
......
......@@ -273,6 +273,7 @@ pub(crate) fn export_pov_to_path<Block: BlockT>(
parent_header: Block::Header,
relay_parent_storage_root: RHash,
relay_parent_number: RBlockNumber,
max_pov_size: u32,
) {
if let Err(error) = fs::create_dir_all(&path) {
tracing::error!(target: LOG_TARGET, %error, path = %path.display(), "Failed to create PoV export directory");
......@@ -288,7 +289,11 @@ pub(crate) fn export_pov_to_path<Block: BlockT>(
};
pov.encode_to(&mut file);
HeadData(parent_header.encode()).encode_to(&mut file);
relay_parent_storage_root.encode_to(&mut file);
relay_parent_number.encode_to(&mut file);
PersistedValidationData {
parent_head: HeadData(parent_header.encode()),
relay_parent_number,
relay_parent_storage_root,
max_pov_size,
}
.encode_to(&mut file);
}
......@@ -17,9 +17,7 @@
use crate::imports::*;
use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApiV2;
use emulated_integration_tests_common::test_chain_can_claim_assets;
use frame_support::traits::fungible::Mutate;
use xcm_executor::traits::DropAssets;
#[test]
......@@ -35,83 +33,3 @@ fn assets_can_be_claimed() {
amount
);
}
#[test]
fn chain_can_claim_assets_for_its_users() {
// Many Penpal users have assets trapped in AssetHubWestend.
let beneficiaries: Vec<(Location, Assets)> = vec![
// Some WND.
(
Location::new(1, [Parachain(2000), AccountId32 { id: [0u8; 32], network: None }]),
(Parent, 10_000_000_000_000u128).into(),
),
// Some USDT.
(
Location::new(1, [Parachain(2000), AccountId32 { id: [1u8; 32], network: None }]),
([PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())], 100_000_000u128)
.into(),
),
];
// Start with those assets trapped.
AssetHubWestend::execute_with(|| {
for (location, assets) in &beneficiaries {
<AssetHubWestend as AssetHubWestendPallet>::PolkadotXcm::drop_assets(
location,
assets.clone().into(),
&XcmContext { origin: None, message_id: [0u8; 32], topic: None },
);
}
});
let penpal_to_asset_hub = PenpalA::sibling_location_of(AssetHubWestend::para_id());
let mut builder = Xcm::<()>::builder()
.withdraw_asset((Parent, 1_000_000_000_000u128))
.pay_fees((Parent, 100_000_000_000u128));
// Loop through all beneficiaries.
for (location, assets) in &beneficiaries {
builder = builder.execute_with_origin(
// We take only the last part, the `AccountId32` junction.
Some((*location.interior().last().unwrap()).into()),
Xcm::<()>::builder_unsafe()
.claim_asset(assets.clone(), Location::new(0, [GeneralIndex(5)])) // Means lost assets were version 5.
.deposit_asset(assets.clone(), location.clone())
.build(),
)
}
// Finish assembling the message.
let message = builder.build();
// Fund PenpalA's sovereign account on AssetHubWestend so it can pay for fees.
AssetHubWestend::execute_with(|| {
let penpal_as_seen_by_asset_hub = AssetHubWestend::sibling_location_of(PenpalA::para_id());
let penpal_sov_account_on_asset_hub =
AssetHubWestend::sovereign_account_id_of(penpal_as_seen_by_asset_hub);
type Balances = <AssetHubWestend as AssetHubWestendPallet>::Balances;
assert_ok!(<Balances as Mutate<_>>::mint_into(
&penpal_sov_account_on_asset_hub,
2_000_000_000_000u128,
));
});
// We can send a message from Penpal root that claims all those assets for each beneficiary.
PenpalA::execute_with(|| {
assert_ok!(<PenpalA as PenpalAPallet>::PolkadotXcm::send(
<PenpalA as Chain>::RuntimeOrigin::root(),
bx!(penpal_to_asset_hub.into()),
bx!(VersionedXcm::from(message)),
));
});
// We assert beneficiaries have received their funds.
AssetHubWestend::execute_with(|| {
for (location, expected_assets) in &beneficiaries {
let sov_account = AssetHubWestend::sovereign_account_id_of(location.clone());
let actual_assets =
<AssetHubWestend as Chain>::Runtime::query_account_balances(sov_account).unwrap();
assert_eq!(VersionedAssets::from(expected_assets.clone()), actual_assets);
}
});
}