where
- Suffix: SignedExtensionSchema,
+ Suffix: TransactionExtensionSchema,
{
fn from_params(
spec_version: u32,
@@ -321,9 +326,9 @@ where
genesis_hash: Hash,
nonce: Nonce,
tip: Balance,
- extra: (Suffix::Payload, Suffix::AdditionalSigned),
+ extra: (Suffix::Payload, Suffix::Implicit),
) -> Self {
- GenericSignedExtension::new(
+ GenericTransactionExtension::new(
(
(
(), // non-zero sender
@@ -365,7 +370,7 @@ where
}
/// Signed extension that is used by most chains.
-pub type CommonSignedExtension = SuffixedCommonSignedExtension<()>;
+pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>;
#[cfg(test)]
mod tests {
diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml
index 6786bf8f21ced12e2424ecc17ff0c4ce96fd96d7..22206fb2c376ce53fee9dc8ff806baaef3ce7c28 100644
--- a/bridges/primitives/runtime/Cargo.toml
+++ b/bridges/primitives/runtime/Cargo.toml
@@ -13,10 +13,10 @@ workspace = true
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false }
hash-db = { version = "0.16.0", default-features = false }
impl-trait-for-tuples = "0.2.2"
-log = { version = "0.4.19", default-features = false }
+log = { workspace = true }
num-traits = { version = "0.2", default-features = false }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
-serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] }
+serde = { features = ["alloc", "derive"], workspace = true }
# Substrate Dependencies
diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs
index d896bc92efffc4e8fcb427ffa7057dece6f17241..a31e7b5bb47a64ec2333bbaba3e9c520aa53ef5a 100644
--- a/bridges/primitives/runtime/src/extensions.rs
+++ b/bridges/primitives/runtime/src/extensions.rs
@@ -20,135 +20,138 @@ use codec::{Compact, Decode, Encode};
use impl_trait_for_tuples::impl_for_tuples;
use scale_info::{StaticTypeInfo, TypeInfo};
use sp_runtime::{
- traits::{DispatchInfoOf, SignedExtension},
+ impl_tx_ext_default,
+ traits::{Dispatchable, TransactionExtension, TransactionExtensionBase},
transaction_validity::TransactionValidityError,
};
use sp_std::{fmt::Debug, marker::PhantomData};
-/// Trait that describes some properties of a `SignedExtension` that are needed in order to send a
-/// transaction to the chain.
-pub trait SignedExtensionSchema: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo {
+/// Trait that describes some properties of a `TransactionExtension` that are needed in order to
+/// send a transaction to the chain.
+pub trait TransactionExtensionSchema:
+ Encode + Decode + Debug + Eq + Clone + StaticTypeInfo
+{
/// A type of the data encoded as part of the transaction.
type Payload: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo;
/// Parameters which are part of the payload used to produce transaction signature,
/// but don't end up in the transaction itself (i.e. inherent part of the runtime).
- type AdditionalSigned: Encode + Debug + Eq + Clone + StaticTypeInfo;
+ type Implicit: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo;
}
-impl SignedExtensionSchema for () {
+impl TransactionExtensionSchema for () {
type Payload = ();
- type AdditionalSigned = ();
+ type Implicit = ();
}
-/// An implementation of `SignedExtensionSchema` using generic params.
+/// An implementation of `TransactionExtensionSchema` using generic params.
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, TypeInfo)]
-pub struct GenericSignedExtensionSchema(PhantomData<(P, S)>);
+pub struct GenericTransactionExtensionSchema
(PhantomData<(P, S)>);
-impl
SignedExtensionSchema for GenericSignedExtensionSchema
+impl
TransactionExtensionSchema for GenericTransactionExtensionSchema
where
P: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo,
- S: Encode + Debug + Eq + Clone + StaticTypeInfo,
+ S: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo,
{
type Payload = P;
- type AdditionalSigned = S;
+ type Implicit = S;
}
-/// The `SignedExtensionSchema` for `frame_system::CheckNonZeroSender`.
-pub type CheckNonZeroSender = GenericSignedExtensionSchema<(), ()>;
+/// The `TransactionExtensionSchema` for `frame_system::CheckNonZeroSender`.
+pub type CheckNonZeroSender = GenericTransactionExtensionSchema<(), ()>;
-/// The `SignedExtensionSchema` for `frame_system::CheckSpecVersion`.
-pub type CheckSpecVersion = GenericSignedExtensionSchema<(), u32>;
+/// The `TransactionExtensionSchema` for `frame_system::CheckSpecVersion`.
+pub type CheckSpecVersion = GenericTransactionExtensionSchema<(), u32>;
-/// The `SignedExtensionSchema` for `frame_system::CheckTxVersion`.
-pub type CheckTxVersion = GenericSignedExtensionSchema<(), u32>;
+/// The `TransactionExtensionSchema` for `frame_system::CheckTxVersion`.
+pub type CheckTxVersion = GenericTransactionExtensionSchema<(), u32>;
-/// The `SignedExtensionSchema` for `frame_system::CheckGenesis`.
-pub type CheckGenesis = GenericSignedExtensionSchema<(), Hash>;
+/// The `TransactionExtensionSchema` for `frame_system::CheckGenesis`.
+pub type CheckGenesis = GenericTransactionExtensionSchema<(), Hash>;
-/// The `SignedExtensionSchema` for `frame_system::CheckEra`.
-pub type CheckEra = GenericSignedExtensionSchema;
+/// The `TransactionExtensionSchema` for `frame_system::CheckEra`.
+pub type CheckEra = GenericTransactionExtensionSchema;
-/// The `SignedExtensionSchema` for `frame_system::CheckNonce`.
-pub type CheckNonce = GenericSignedExtensionSchema, ()>;
+/// The `TransactionExtensionSchema` for `frame_system::CheckNonce`.
+pub type CheckNonce = GenericTransactionExtensionSchema, ()>;
-/// The `SignedExtensionSchema` for `frame_system::CheckWeight`.
-pub type CheckWeight = GenericSignedExtensionSchema<(), ()>;
+/// The `TransactionExtensionSchema` for `frame_system::CheckWeight`.
+pub type CheckWeight = GenericTransactionExtensionSchema<(), ()>;
-/// The `SignedExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`.
-pub type ChargeTransactionPayment = GenericSignedExtensionSchema, ()>;
+/// The `TransactionExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`.
+pub type ChargeTransactionPayment =
+ GenericTransactionExtensionSchema, ()>;
-/// The `SignedExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`.
-pub type PrevalidateAttests = GenericSignedExtensionSchema<(), ()>;
+/// The `TransactionExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`.
+pub type PrevalidateAttests = GenericTransactionExtensionSchema<(), ()>;
-/// The `SignedExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`.
-pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<(), ()>;
+/// The `TransactionExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`.
+pub type BridgeRejectObsoleteHeadersAndMessages = GenericTransactionExtensionSchema<(), ()>;
-/// The `SignedExtensionSchema` for `RefundBridgedParachainMessages`.
+/// The `TransactionExtensionSchema` for `RefundBridgedParachainMessages`.
/// This schema is dedicated for `RefundBridgedParachainMessages` signed extension as
/// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (),
/// ())` is the same. So runtime can contains any kind of tuple:
/// `(BridgeRefundBridgeHubRococoMessages)`
/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)`
/// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)`
-pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>;
+pub type RefundBridgedParachainMessagesSchema = GenericTransactionExtensionSchema<(), ()>;
#[impl_for_tuples(1, 12)]
-impl SignedExtensionSchema for Tuple {
+impl TransactionExtensionSchema for Tuple {
for_tuples!( type Payload = ( #( Tuple::Payload ),* ); );
- for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); );
+ for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); );
}
/// A simplified version of signed extensions meant for producing signed transactions
/// and signed payloads in the client code.
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
-pub struct GenericSignedExtension {
+pub struct GenericTransactionExtension {
/// A payload that is included in the transaction.
pub payload: S::Payload,
#[codec(skip)]
// It may be set to `None` if extensions are decoded. We are never reconstructing transactions
- // (and it makes no sense to do that) => decoded version of `SignedExtensions` is only used to
- // read fields of the `payload`. And when resigning transaction, we're reconstructing
- // `SignedExtensions` from scratch.
- additional_signed: Option,
+ // (and it makes no sense to do that) => decoded version of `TransactionExtensions` is only
+ // used to read fields of the `payload`. And when resigning transaction, we're reconstructing
+ // `TransactionExtensions` from scratch.
+ implicit: Option,
}
-impl GenericSignedExtension {
- /// Create new `GenericSignedExtension` object.
- pub fn new(payload: S::Payload, additional_signed: Option) -> Self {
- Self { payload, additional_signed }
+impl GenericTransactionExtension {
+ /// Create new `GenericTransactionExtension` object.
+ pub fn new(payload: S::Payload, implicit: Option) -> Self {
+ Self { payload, implicit }
}
}
-impl SignedExtension for GenericSignedExtension
+impl TransactionExtensionBase for GenericTransactionExtension
where
- S: SignedExtensionSchema,
+ S: TransactionExtensionSchema,
S::Payload: Send + Sync,
- S::AdditionalSigned: Send + Sync,
+ S::Implicit: Send + Sync,
{
const IDENTIFIER: &'static str = "Not needed.";
- type AccountId = ();
- type Call = ();
- type AdditionalSigned = S::AdditionalSigned;
- type Pre = ();
+ type Implicit = S::Implicit;
- fn additional_signed(&self) -> Result {
+ fn implicit(&self) -> Result {
// we shall not ever see this error in relay, because we are never signing decoded
// transactions. Instead we're constructing and signing new transactions. So the error code
// is kinda random here
- self.additional_signed.clone().ok_or(
- frame_support::unsigned::TransactionValidityError::Unknown(
+ self.implicit
+ .clone()
+ .ok_or(frame_support::unsigned::TransactionValidityError::Unknown(
frame_support::unsigned::UnknownTransaction::Custom(0xFF),
- ),
- )
+ ))
}
+}
+impl TransactionExtension for GenericTransactionExtension
+where
+ C: Dispatchable,
+ S: TransactionExtensionSchema,
+ S::Payload: Send + Sync,
+ S::Implicit: Send + Sync,
+{
+ type Pre = ();
+ type Val = ();
- fn pre_dispatch(
- self,
- _who: &Self::AccountId,
- _call: &Self::Call,
- _info: &DispatchInfoOf,
- _len: usize,
- ) -> Result {
- Ok(())
- }
+ impl_tx_ext_default!(C; Context; validate prepare);
}
diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs
index f23ddd1a10d3681900b024999aef279ea6fcb91d..1d80890779bf8310b393d585749e96f9577196a1 100644
--- a/bridges/primitives/test-utils/src/lib.rs
+++ b/bridges/primitives/test-utils/src/lib.rs
@@ -129,7 +129,7 @@ pub fn make_justification_for_header(
votes_ancestries.push(child.clone());
}
- // The header we need to use when pre-commiting is the one at the highest height
+ // The header we need to use when pre-committing is the one at the highest height
// on our chain.
let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap();
unsigned_precommits.push(precommit_candidate);
diff --git a/bridges/snowbridge/README.md b/bridges/snowbridge/README.md
index 49b9c2eaf553780176897a770bad9579d53bfaa9..6561df401120e9c5c5d6ee2762eb1423b5d6daaf 100644
--- a/bridges/snowbridge/README.md
+++ b/bridges/snowbridge/README.md
@@ -1,32 +1,40 @@
-# Snowbridge
-[![codecov](https://codecov.io/gh/Snowfork/snowbridge/branch/main/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/snowbridge)
+# Snowbridge ·
+[![codecov](https://codecov.io/gh/Snowfork/polkadot-sdk/branch/snowbridge/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/polkadot-sdk)
![GitHub](https://img.shields.io/github/license/Snowfork/snowbridge)
Snowbridge is a trustless bridge between Polkadot and Ethereum. For documentation, visit https://docs.snowbridge.network.
## Components
+The Snowbridge project lives in two repositories:
+
+- [Snowfork/Polkadot-sdk](https://github.com/Snowfork/polkadot-sdk): The Snowbridge parachain and pallets live in
+a fork of the Polkadot SDK. Changes are eventually contributed back to
+[paritytech/Polkadot-sdk](https://github.com/paritytech/polkadot-sdk)
+- [Snowfork/snowbridge](https://github.com/Snowfork/snowbridge): The rest of the Snowbridge components, like contracts,
+off-chain relayer, end-to-end tests and test-net setup code.
+
### Parachain
-Polkadot parachain and our pallets. See [parachain/README.md](https://github.com/Snowfork/snowbridge/blob/main/parachain/README.md).
+Polkadot parachain and our pallets. See [README.md](https://github.com/Snowfork/polkadot-sdk/blob/snowbridge/bridges/snowbridge/README.md).
### Contracts
-Ethereum contracts and unit tests. See [contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md)
+Ethereum contracts and unit tests. See [Snowfork/snowbridge/contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md)
### Relayer
Off-chain relayer services for relaying messages between Polkadot and Ethereum. See
-[relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md)
+[Snowfork/snowbridge/relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md)
### Local Testnet
Scripts to provision a local testnet, running the above services to bridge between local deployments of Polkadot and
-Ethereum. See [web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md).
+Ethereum. See [Snowfork/snowbridge/web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md).
### Smoke Tests
-Integration tests for our local testnet. See [smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md).
+Integration tests for our local testnet. See [Snowfork/snowbridge/smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md).
## Development
@@ -83,7 +91,7 @@ direnv allow
### Upgrading the Rust toolchain
-Sometimes we would like to upgrade rust toolchain. First update `parachain/rust-toolchain.toml` as required and then
+Sometimes we would like to upgrade rust toolchain. First update `rust-toolchain.toml` as required and then
update `flake.lock` running
```sh
nix flake lock --update-input rust-overlay
diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml
index 2f76d5b8357985c65cdc09c86257bc6ddd766250..c8999633c97abb00174e38e16ed5618e7baf0b59 100644
--- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml
+++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-pallet-ethereum-client"
description = "Snowbridge Ethereum Client Pallet"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -15,8 +15,8 @@ workspace = true
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
-serde = { version = "1.0.195", optional = true }
-serde_json = { version = "1.0.111", optional = true }
+serde = { optional = true, workspace = true, default-features = true }
+serde_json = { optional = true, workspace = true, default-features = true }
codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
ssz_rs = { version = "0.9.0", default-features = false }
@@ -24,7 +24,7 @@ ssz_rs_derive = { version = "0.9.0", default-features = false }
byte-slice-cast = { version = "1.2.1", default-features = false }
rlp = { version = "0.5.2", default-features = false }
hex-literal = { version = "0.4.1", optional = true }
-log = { version = "0.4.20", default-features = false }
+log = { workspace = true }
frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true }
frame-support = { path = "../../../../substrate/frame/support", default-features = false }
@@ -45,12 +45,12 @@ pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-fea
[dev-dependencies]
rand = "0.8.5"
sp-keyring = { path = "../../../../substrate/primitives/keyring" }
-serde_json = "1.0.111"
+serde_json = { workspace = true, default-features = true }
hex-literal = "0.4.1"
pallet-timestamp = { path = "../../../../substrate/frame/timestamp" }
snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures" }
sp-io = { path = "../../../../substrate/primitives/io" }
-serde = "1.0.195"
+serde = { workspace = true, default-features = true }
[features]
default = ["std"]
diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml
index 1081b162ddaec58037863a65028db4dffd03b438..b850496cd4e14cd906565d488450b339a29f463f 100644
--- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml
+++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-pallet-inbound-queue"
description = "Snowbridge Inbound Queue Pallet"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -15,11 +15,11 @@ workspace = true
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
-serde = { version = "1.0.195", optional = true }
+serde = { optional = true, workspace = true, default-features = true }
codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
hex-literal = { version = "0.4.1", optional = true }
-log = { version = "0.4.20", default-features = false }
+log = { workspace = true }
alloy-primitives = { version = "0.4.2", default-features = false, features = ["rlp"] }
alloy-sol-types = { version = "0.4.2", default-features = false }
alloy-rlp = { version = "0.3.3", default-features = false, features = ["derive"] }
diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml
index 61f1421e056773c4f078390f9c48f7b8fa0420d3..64605a42f0d383d838429eb9b82b5f6cf238ab09 100644
--- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml
+++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-pallet-inbound-queue-fixtures"
description = "Snowbridge Inbound Queue Test Fixtures"
-version = "0.9.0"
+version = "0.10.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs
index 110f611c6766020039bd1f73def900914da8cae2..749fb0367f332d743b01ad9d56238106ced36e72 100644
--- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs
+++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs
@@ -3,7 +3,7 @@
use super::*;
use frame_support::{
- parameter_types,
+ derive_impl, parameter_types,
traits::{ConstU128, ConstU32, Everything},
weights::IdentityFee,
};
@@ -47,10 +47,9 @@ parameter_types! {
type Balance = u128;
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for Test {
type BaseCallFilter = Everything;
- type BlockWeights = ();
- type BlockLength = ();
type RuntimeOrigin = RuntimeOrigin;
type RuntimeCall = RuntimeCall;
type RuntimeTask = RuntimeTask;
@@ -60,16 +59,8 @@ impl frame_system::Config for Test {
type Lookup = IdentityLookup;
type RuntimeEvent = RuntimeEvent;
type BlockHashCount = BlockHashCount;
- type DbWeight = ();
- type Version = ();
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData;
- type OnNewAccount = ();
- type OnKilledAccount = ();
- type SystemWeightInfo = ();
- type SS58Prefix = ();
- type OnSetCode = ();
- type MaxConsumers = frame_support::traits::ConstU32<16>;
type Nonce = u64;
type Block = Block;
}
diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml
index ae52fb3e5c49d5d35558d78a38b7f00b0d01ac8f..f16a28cb1e457d9ebfb7804fa013e5b57858f79e 100644
--- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml
+++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-pallet-outbound-queue"
description = "Snowbridge Outbound Queue Pallet"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -15,7 +15,7 @@ workspace = true
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
-serde = { version = "1.0.195", features = ["alloc", "derive"], default-features = false }
+serde = { features = ["alloc", "derive"], workspace = true }
codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
hex-literal = { version = "0.4.1", optional = true }
diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml
index c185d5af7062045f40946fcbd3c45cb62b932216..0606e9de33056c9dffae50befcc1da5e865dca44 100644
--- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml
+++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-outbound-queue-merkle-tree"
description = "Snowbridge Outbound Queue Merkle Tree"
-version = "0.1.1"
+version = "0.3.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml
index 347b3bae493b7491790854be7a28f82386d2ee4b..cb68fd0a250a92e7f6a6693f3aebf1c8553308aa 100644
--- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml
+++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-outbound-queue-runtime-api"
description = "Snowbridge Outbound Queue Runtime API"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs
index dd8fee4e2ed08ec0f3090b765fa882b063a98300..6e78fb4467210e3cb5e1eb581b377cbbfeac74ad 100644
--- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs
+++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs
@@ -3,7 +3,7 @@
use super::*;
use frame_support::{
- parameter_types,
+ derive_impl, parameter_types,
traits::{Everything, Hooks},
weights::IdentityFee,
};
@@ -37,10 +37,9 @@ parameter_types! {
pub const BlockHashCount: u64 = 250;
}
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for Test {
type BaseCallFilter = Everything;
- type BlockWeights = ();
- type BlockLength = ();
type RuntimeOrigin = RuntimeOrigin;
type RuntimeCall = RuntimeCall;
type RuntimeTask = RuntimeTask;
@@ -50,16 +49,7 @@ impl frame_system::Config for Test {
type Lookup = IdentityLookup;
type RuntimeEvent = RuntimeEvent;
type BlockHashCount = BlockHashCount;
- type DbWeight = ();
- type Version = ();
type PalletInfo = PalletInfo;
- type AccountData = ();
- type OnNewAccount = ();
- type OnKilledAccount = ();
- type SystemWeightInfo = ();
- type SS58Prefix = ();
- type OnSetCode = ();
- type MaxConsumers = frame_support::traits::ConstU32<16>;
type Nonce = u64;
type Block = Block;
}
diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml
index aa600511633bb7d5fef36f8d4ae6cec847c4e6c4..5ad04290de044a2c8ed13aa092f5ea033aaafb97 100644
--- a/bridges/snowbridge/pallets/system/Cargo.toml
+++ b/bridges/snowbridge/pallets/system/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-pallet-system"
description = "Snowbridge System Pallet"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -22,7 +22,7 @@ scale-info = { version = "2.9.0", default-features = false, features = ["derive"
frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true }
frame-support = { path = "../../../../substrate/frame/support", default-features = false }
frame-system = { path = "../../../../substrate/frame/system", default-features = false }
-log = { version = "0.4.20", default-features = false }
+log = { workspace = true }
sp-core = { path = "../../../../substrate/primitives/core", default-features = false }
sp-std = { path = "../../../../substrate/primitives/std", default-features = false }
diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml
index 355d2d29147f3cd84ae013363db874c9b9739b8e..eb02ae1db529730f51743e79a322e54db44fee51 100644
--- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml
+++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-system-runtime-api"
description = "Snowbridge System Runtime API"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs
index b7f38fb753d31bd67acb78174e175f90fc711175..6e5ceb5e9b1d42796567c3da5e549b2af3cfd4de 100644
--- a/bridges/snowbridge/pallets/system/src/lib.rs
+++ b/bridges/snowbridge/pallets/system/src/lib.rs
@@ -37,8 +37,6 @@
//! `force_update_channel` and extrinsics to manage agents and channels for system parachains.
#![cfg_attr(not(feature = "std"), no_std)]
-pub use pallet::*;
-
#[cfg(test)]
mod mock;
@@ -79,6 +77,8 @@ use xcm_executor::traits::ConvertLocation;
#[cfg(feature = "runtime-benchmarks")]
use frame_support::traits::OriginTrait;
+pub use pallet::*;
+
pub type BalanceOf =
<::Token as Inspect<::AccountId>>::Balance;
pub type AccountIdOf = ::AccountId;
diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs
index edc3f141b0735d7439b120c51da836fb8a77bd04..de2970dd550ba75fe42de08dc4d297cd5cccdf1f 100644
--- a/bridges/snowbridge/pallets/system/src/mock.rs
+++ b/bridges/snowbridge/pallets/system/src/mock.rs
@@ -2,8 +2,8 @@
// SPDX-FileCopyrightText: 2023 Snowfork
use crate as snowbridge_system;
use frame_support::{
- parameter_types,
- traits::{tokens::fungible::Mutate, ConstU128, ConstU16, ConstU64, ConstU8},
+ derive_impl, parameter_types,
+ traits::{tokens::fungible::Mutate, ConstU128, ConstU64, ConstU8},
weights::IdentityFee,
PalletId,
};
@@ -95,11 +95,9 @@ frame_support::construct_runtime!(
}
);
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for Test {
type BaseCallFilter = frame_support::traits::Everything;
- type BlockWeights = ();
- type BlockLength = ();
- type DbWeight = ();
type RuntimeOrigin = RuntimeOrigin;
type RuntimeCall = RuntimeCall;
type RuntimeTask = RuntimeTask;
@@ -109,15 +107,8 @@ impl frame_system::Config for Test {
type Lookup = IdentityLookup;
type RuntimeEvent = RuntimeEvent;
type BlockHashCount = ConstU64<250>;
- type Version = ();
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData;
- type OnNewAccount = ();
- type OnKilledAccount = ();
- type SystemWeightInfo = ();
- type SS58Prefix = ConstU16<42>;
- type OnSetCode = ();
- type MaxConsumers = frame_support::traits::ConstU32<16>;
type Nonce = u64;
type Block = Block;
}
diff --git a/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..77b5c1aa631db89a986837f258ee7dea45a580d0
--- /dev/null
+++ b/bridges/snowbridge/parachain/pallets/ethereum-beacon-client/src/mock.rs
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 Snowfork
+use crate as ethereum_beacon_client;
+use frame_support::parameter_types;
+use pallet_timestamp;
+use primitives::{Fork, ForkVersions};
+use sp_core::H256;
+use sp_runtime::traits::{BlakeTwo256, IdentityLookup};
+
+#[cfg(not(feature = "beacon-spec-mainnet"))]
+pub mod minimal {
+ use super::*;
+
+ use crate::config;
+ use frame_support::derive_impl;
+ use hex_literal::hex;
+ use primitives::CompactExecutionHeader;
+ use snowbridge_core::inbound::{Log, Proof};
+ use sp_runtime::BuildStorage;
+ use std::{fs::File, path::PathBuf};
+
+ type Block = frame_system::mocking::MockBlock;
+
+ frame_support::construct_runtime!(
+ pub enum Test {
+ System: frame_system::{Pallet, Call, Storage, Event},
+ Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
+ EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event},
+ }
+ );
+
+ parameter_types! {
+ pub const BlockHashCount: u64 = 250;
+ pub const SS58Prefix: u8 = 42;
+ }
+
+ #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
+ impl frame_system::Config for Test {
+ type BaseCallFilter = frame_support::traits::Everything;
+ type RuntimeOrigin = RuntimeOrigin;
+ type RuntimeCall = RuntimeCall;
+ type RuntimeTask = RuntimeTask;
+ type Hash = H256;
+ type Hashing = BlakeTwo256;
+ type AccountId = u64;
+ type Lookup = IdentityLookup;
+ type RuntimeEvent = RuntimeEvent;
+ type BlockHashCount = BlockHashCount;
+ type PalletInfo = PalletInfo;
+ type SS58Prefix = SS58Prefix;
+ type Nonce = u64;
+ type Block = Block;
+ }
+
+ impl pallet_timestamp::Config for Test {
+ type Moment = u64;
+ type OnTimestampSet = ();
+ type MinimumPeriod = ();
+ type WeightInfo = ();
+ }
+
+ parameter_types! {
+ pub const ExecutionHeadersPruneThreshold: u32 = 10;
+ pub const ChainForkVersions: ForkVersions = ForkVersions{
+ genesis: Fork {
+ version: [0, 0, 0, 1], // 0x00000001
+ epoch: 0,
+ },
+ altair: Fork {
+ version: [1, 0, 0, 1], // 0x01000001
+ epoch: 0,
+ },
+ bellatrix: Fork {
+ version: [2, 0, 0, 1], // 0x02000001
+ epoch: 0,
+ },
+ capella: Fork {
+ version: [3, 0, 0, 1], // 0x03000001
+ epoch: 0,
+ },
+ };
+ }
+
+ impl ethereum_beacon_client::Config for Test {
+ type RuntimeEvent = RuntimeEvent;
+ type ForkVersions = ChainForkVersions;
+ type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold;
+ type WeightInfo = ();
+ }
+
+ // Build genesis storage according to the mock runtime.
+ pub fn new_tester() -> sp_io::TestExternalities {
+ let t = frame_system::GenesisConfig::::default().build_storage().unwrap();
+ let mut ext = sp_io::TestExternalities::new(t);
+ let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000));
+ ext
+ }
+
+ fn load_fixture(basename: &str) -> Result
+ where
+ T: for<'de> serde::Deserialize<'de>,
+ {
+ let filepath: PathBuf =
+ [env!("CARGO_MANIFEST_DIR"), "tests", "fixtures", basename].iter().collect();
+ serde_json::from_reader(File::open(filepath).unwrap())
+ }
+
+ pub fn load_execution_header_update_fixture() -> primitives::ExecutionHeaderUpdate {
+ load_fixture("execution-header-update.minimal.json").unwrap()
+ }
+
+ pub fn load_checkpoint_update_fixture(
+ ) -> primitives::CheckpointUpdate<{ config::SYNC_COMMITTEE_SIZE }> {
+ load_fixture("initial-checkpoint.minimal.json").unwrap()
+ }
+
+ pub fn load_sync_committee_update_fixture(
+ ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> {
+ load_fixture("sync-committee-update.minimal.json").unwrap()
+ }
+
+ pub fn load_finalized_header_update_fixture(
+ ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> {
+ load_fixture("finalized-header-update.minimal.json").unwrap()
+ }
+
+ pub fn load_next_sync_committee_update_fixture(
+ ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> {
+ load_fixture("next-sync-committee-update.minimal.json").unwrap()
+ }
+
+ pub fn load_next_finalized_header_update_fixture(
+ ) -> primitives::Update<{ config::SYNC_COMMITTEE_SIZE }, { config::SYNC_COMMITTEE_BITS_SIZE }> {
+ load_fixture("next-finalized-header-update.minimal.json").unwrap()
+ }
+
+ pub fn get_message_verification_payload() -> (Log, Proof) {
+ (
+ Log {
+ address: hex!("ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0").into(),
+ topics: vec![
+ hex!("1b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ad").into(),
+ hex!("00000000000000000000000000000000000000000000000000000000000003e8").into(),
+ hex!("0000000000000000000000000000000000000000000000000000000000000001").into(),
+ ],
+ data: hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000").into(),
+ },
+ Proof {
+ block_hash: hex!("05aaa60b0f27cce9e71909508527264b77ee14da7b5bf915fcc4e32715333213").into(),
+ tx_index: 0,
+ data: (vec![
+ hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb").to_vec(),
+ hex!("d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c185510").to_vec(),
+ hex!("b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646").to_vec(),
+ ], vec![
+ hex!("f90131a0b601337b3aa10a671caa724eba641e759399979856141d3aea6b6b4ac59b889ba00c7d5dd48be9060221a02fb8fa213860b4c50d47046c8fa65ffaba5737d569e0a094601b62a1086cd9c9cb71a7ebff9e718f3217fd6e837efe4246733c0a196f63a06a4b0dd0aefc37b3c77828c8f07d1b7a2455ceb5dbfd3c77d7d6aeeddc2f7e8ca0d6e8e23142cdd8ec219e1f5d8b56aa18e456702b195deeaa210327284d42ade4a08a313d4c87023005d1ab631bbfe3f5de1e405d0e66d0bef3e033f1e5711b5521a0bf09a5d9a48b10ade82b8d6a5362a15921c8b5228a3487479b467db97411d82fa0f95cccae2a7c572ef3c566503e30bac2b2feb2d2f26eebf6d870dcf7f8cf59cea0d21fc4f68ab05bc4dcb23c67008e92c4d466437cdd6ed7aad0c008944c1855108080808080808080").to_vec(),
+ hex!("f851a0b9890f91ca0d77aa2a4adfaf9b9e40c94cac9e638b6d9797923865872944b646a060a634b9280e3a23fb63375e7bbdd9ab07fd379ab6a67e2312bbc112195fa358808080808080808080808080808080").to_vec(),
+ hex!("f9030820b9030402f90300018301d6e2b9010000000000000800000000000020040008000000000000000000000000400000008000000000000000000000000000000000000000000000000000000000042010000000001000000000000000000000000000000000040000000000000000000000000000000000000000000000008000000000000000002000000000000000000000000200000000000000200000000000100000000040000001000200008000000000000200000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000f901f5f87a942ffa5ecdbe006d30397c7636d3e015eee251369ff842a0c965575a00553e094ca7c5d14f02e107c258dda06867cbf9e0e69f80e71bbcc1a000000000000000000000000000000000000000000000000000000000000003e8a000000000000000000000000000000000000000000000000000000000000003e8f9011c94ee9170abfbf9421ad6dd07f6bdec9d89f2b581e0f863a01b11dcf133cc240f682dab2d3a8e4cd35c5da8c9cf99adac4336f8512584c5ada000000000000000000000000000000000000000000000000000000000000003e8a00000000000000000000000000000000000000000000000000000000000000001b8a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004b000f000000000000000100d184c103f7acc340847eee82a0b909e3358bc28d440edffa1352b13227e8ee646f3ea37456dec701345772617070656420457468657210574554481235003511000000000000000000000000000000000000000000f858948cf6147918a5cbb672703f879f385036f8793a24e1a01449abf21e49fd025f33495e77f7b1461caefdd3d4bb646424a3f445c4576a5ba0000000000000000000000000440edffa1352b13227e8ee646f3ea37456dec701").to_vec(),
+ ]),
+ }
+ )
+ }
+
+ pub fn get_message_verification_header() -> CompactExecutionHeader {
+ CompactExecutionHeader {
+ parent_hash: hex!("04a7f6ab8282203562c62f38b0ab41d32aaebe2c7ea687702b463148a6429e04")
+ .into(),
+ block_number: 55,
+ state_root: hex!("894d968712976d613519f973a317cb0781c7b039c89f27ea2b7ca193f7befdb3")
+ .into(),
+ receipts_root: hex!("cf0d1c1ba57d1e0edfb59786c7e30c2b7e12bd54612b00cd21c4eaeecedf44fb")
+ .into(),
+ }
+ }
+}
+
+#[cfg(feature = "beacon-spec-mainnet")]
+pub mod mainnet {
+ use super::*;
+ use frame_support::derive_impl;
+
+ type Block = frame_system::mocking::MockBlock;
+ use sp_runtime::BuildStorage;
+
+ frame_support::construct_runtime!(
+ pub enum Test {
+ System: frame_system::{Pallet, Call, Storage, Event},
+ Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
+ EthereumBeaconClient: ethereum_beacon_client::{Pallet, Call, Storage, Event},
+ }
+ );
+
+ parameter_types! {
+ pub const BlockHashCount: u64 = 250;
+ pub const SS58Prefix: u8 = 42;
+ }
+
+ #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
+ impl frame_system::Config for Test {
+ type BaseCallFilter = frame_support::traits::Everything;
+ type RuntimeOrigin = RuntimeOrigin;
+ type RuntimeCall = RuntimeCall;
+ type RuntimeTask = RuntimeTask;
+ type Hash = H256;
+ type Hashing = BlakeTwo256;
+ type AccountId = u64;
+ type Lookup = IdentityLookup;
+ type RuntimeEvent = RuntimeEvent;
+ type BlockHashCount = BlockHashCount;
+ type PalletInfo = PalletInfo;
+ type SS58Prefix = SS58Prefix;
+ type Nonce = u64;
+ type Block = Block;
+ }
+
+ impl pallet_timestamp::Config for Test {
+ type Moment = u64;
+ type OnTimestampSet = ();
+ type MinimumPeriod = ();
+ type WeightInfo = ();
+ }
+
+ parameter_types! {
+ pub const ChainForkVersions: ForkVersions = ForkVersions{
+ genesis: Fork {
+ version: [0, 0, 16, 32], // 0x00001020
+ epoch: 0,
+ },
+ altair: Fork {
+ version: [1, 0, 16, 32], // 0x01001020
+ epoch: 36660,
+ },
+ bellatrix: Fork {
+ version: [2, 0, 16, 32], // 0x02001020
+ epoch: 112260,
+ },
+ capella: Fork {
+ version: [3, 0, 16, 32], // 0x03001020
+ epoch: 162304,
+ },
+ };
+ pub const ExecutionHeadersPruneThreshold: u32 = 10;
+ }
+
+ impl ethereum_beacon_client::Config for Test {
+ type RuntimeEvent = RuntimeEvent;
+ type ForkVersions = ChainForkVersions;
+ type MaxExecutionHeadersToKeep = ExecutionHeadersPruneThreshold;
+ type WeightInfo = ();
+ }
+
+ // Build genesis storage according to the mock runtime.
+ pub fn new_tester() -> sp_io::TestExternalities {
+ let t = frame_system::GenesisConfig::::default().build_storage().unwrap();
+ let mut ext = sp_io::TestExternalities::new(t);
+ let _ = ext.execute_with(|| Timestamp::set(RuntimeOrigin::signed(1), 30_000));
+ ext
+ }
+}
diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml
index b59e5191b418753fc3eb1a2342194af4efc311c3..d181fa1d3945a704a3d1e1e28fea67b7dea0ee15 100644
--- a/bridges/snowbridge/primitives/beacon/Cargo.toml
+++ b/bridges/snowbridge/primitives/beacon/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-beacon-primitives"
description = "Snowbridge Beacon Primitives"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"]
workspace = true
[dependencies]
-serde = { version = "1.0.195", optional = true, features = ["derive"] }
+serde = { optional = true, features = ["derive"], workspace = true, default-features = true }
hex = { version = "0.4", default-features = false }
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml
index f735d01a848fb15ced0f086b8b7117e6e771c688..9a299ad0ae92326a6d0bb0391baf81e6e5bad663 100644
--- a/bridges/snowbridge/primitives/core/Cargo.toml
+++ b/bridges/snowbridge/primitives/core/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-core"
description = "Snowbridge Core"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"]
workspace = true
[dependencies]
-serde = { version = "1.0.195", optional = true, features = ["alloc", "derive"], default-features = false }
+serde = { optional = true, features = ["alloc", "derive"], workspace = true }
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
hex-literal = { version = "0.4.1" }
diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml
index 047ebdd56695b1b42768da7108c75ce4c15b4fb4..9fa725a6c0565a5f42847d89149878f8997d07a0 100644
--- a/bridges/snowbridge/primitives/ethereum/Cargo.toml
+++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-ethereum"
description = "Snowbridge Ethereum"
-version = "0.1.0"
+version = "0.3.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -12,8 +12,8 @@ categories = ["cryptography::cryptocurrencies"]
workspace = true
[dependencies]
-serde = { version = "1.0.195", optional = true, features = ["derive"] }
-serde-big-array = { version = "0.3.2", optional = true, features = ["const-generics"] }
+serde = { optional = true, features = ["derive"], workspace = true, default-features = true }
+serde-big-array = { optional = true, features = ["const-generics"], workspace = true }
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
ethbloom = { version = "0.13.0", default-features = false }
@@ -33,7 +33,7 @@ ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = fals
[dev-dependencies]
wasm-bindgen-test = "0.3.19"
rand = "0.8.5"
-serde_json = "1.0.111"
+serde_json = { workspace = true, default-features = true }
[features]
default = ["std"]
diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml
index 712c60c2148f60f12e2be246e32242b67188bdaf..ded773e0d38917b7834679b3e521dfbe9539e51b 100644
--- a/bridges/snowbridge/primitives/router/Cargo.toml
+++ b/bridges/snowbridge/primitives/router/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-router-primitives"
description = "Snowbridge Router Primitives"
-version = "0.0.0"
+version = "0.9.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -12,10 +12,10 @@ categories = ["cryptography::cryptocurrencies"]
workspace = true
[dependencies]
-serde = { version = "1.0.195", optional = true, features = ["derive"] }
+serde = { optional = true, features = ["derive"], workspace = true, default-features = true }
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false }
scale-info = { version = "2.9.0", default-features = false, features = ["derive"] }
-log = { version = "0.4.20", default-features = false }
+log = { workspace = true }
frame-support = { path = "../../../../substrate/frame/support", default-features = false }
frame-system = { path = "../../../../substrate/frame/system", default-features = false }
diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml
index f5b44b25585aaccdc434d93cb039e24b4712aebc..bf5e9a8832dcf48113d5f74a92a060687da2fe4e 100644
--- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml
+++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-runtime-common"
description = "Snowbridge Runtime Common"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition.workspace = true
repository.workspace = true
@@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"]
workspace = true
[dependencies]
-log = { version = "0.4.20", default-features = false }
+log = { workspace = true }
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false }
frame-support = { path = "../../../../substrate/frame/support", default-features = false }
frame-system = { path = "../../../../substrate/frame/system", default-features = false }
diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml
index a2994e618913416214cb7eb21d1a56c23c2d1094..5f169e82f49346742bd97028da583105bf02335d 100644
--- a/bridges/snowbridge/runtime/test-common/Cargo.toml
+++ b/bridges/snowbridge/runtime/test-common/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "snowbridge-runtime-test-common"
description = "Snowbridge Runtime Tests"
-version = "0.0.0"
+version = "0.2.0"
authors = ["Snowfork "]
edition = "2021"
license = "Apache-2.0"
@@ -13,9 +13,9 @@ workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] }
hex-literal = { version = "0.4.1" }
-log = { version = "0.4.20", default-features = false }
+log = { workspace = true }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
-serde = { version = "1.0.195", optional = true, features = ["derive"] }
+serde = { optional = true, features = ["derive"], workspace = true, default-features = true }
smallvec = "1.11.0"
# Substrate
@@ -181,6 +181,7 @@ runtime-benchmarks = [
"pallet-message-queue/runtime-benchmarks",
"pallet-multisig/runtime-benchmarks",
"pallet-timestamp/runtime-benchmarks",
+ "pallet-transaction-payment/runtime-benchmarks",
"pallet-utility/runtime-benchmarks",
"pallet-xcm-benchmarks/runtime-benchmarks",
"pallet-xcm/runtime-benchmarks",
diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs
index c9bbce98e575d5e55015aa7814d8cd57a5c3a966..7455adf76170acefd50f06e8a40ef1c79028f49f 100644
--- a/bridges/snowbridge/runtime/test-common/src/lib.rs
+++ b/bridges/snowbridge/runtime/test-common/src/lib.rs
@@ -13,9 +13,9 @@ use parachains_runtimes_test_utils::{
};
use snowbridge_core::{ChannelId, ParaId};
use snowbridge_pallet_ethereum_client_fixtures::*;
-use sp_core::H160;
+use sp_core::{H160, U256};
use sp_keyring::AccountKeyring::*;
-use sp_runtime::{traits::Header, AccountId32, SaturatedConversion, Saturating};
+use sp_runtime::{traits::Header, AccountId32, DigestItem, SaturatedConversion, Saturating};
use xcm::{
latest::prelude::*,
v3::Error::{self, Barrier},
@@ -40,6 +40,7 @@ where
}
pub fn send_transfer_token_message(
+ ethereum_chain_id: u64,
assethub_parachain_id: u32,
weth_contract_address: H160,
destination_address: H160,
@@ -53,7 +54,8 @@ where
+ parachain_info::Config
+ pallet_collator_selection::Config
+ cumulus_pallet_parachain_system::Config
- + snowbridge_pallet_outbound_queue::Config,
+ + snowbridge_pallet_outbound_queue::Config
+ + pallet_timestamp::Config,
XcmConfig: xcm_executor::Config,
{
let assethub_parachain_location = Location::new(1, Parachain(assethub_parachain_id));
@@ -88,7 +90,7 @@ where
WithdrawAsset(Assets::from(vec![fee.clone()])),
BuyExecution { fees: fee, weight_limit: Unlimited },
ExportMessage {
- network: Ethereum { chain_id: 11155111 },
+ network: Ethereum { chain_id: ethereum_chain_id },
destination: Here,
xcm: inner_xcm,
},
@@ -106,6 +108,7 @@ where
}
pub fn send_transfer_token_message_success(
+ ethereum_chain_id: u64,
collator_session_key: CollatorSessionKeys,
runtime_para_id: u32,
assethub_parachain_id: u32,
@@ -125,7 +128,8 @@ pub fn send_transfer_token_message_success(
+ pallet_message_queue::Config
+ cumulus_pallet_parachain_system::Config
+ snowbridge_pallet_outbound_queue::Config
- + snowbridge_pallet_system::Config,
+ + snowbridge_pallet_system::Config
+ + pallet_timestamp::Config,
XcmConfig: xcm_executor::Config,
ValidatorIdOf: From>,
::AccountId: From + AsRef<[u8]>,
@@ -147,6 +151,7 @@ pub fn send_transfer_token_message_success(
initial_fund::(assethub_parachain_id, 5_000_000_000_000);
let outcome = send_transfer_token_message::(
+ ethereum_chain_id,
assethub_parachain_id,
weth_contract_address,
destination_address,
@@ -193,13 +198,104 @@ pub fn send_transfer_token_message_success(
let digest = included_head.digest();
- //let digest = frame_system::Pallet::::digest();
let digest_items = digest.logs();
assert!(digest_items.len() == 1 && digest_items[0].as_other().is_some());
});
}
+pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works<
+ Runtime,
+ XcmConfig,
+ AllPalletsWithoutSystem,
+>(
+ ethereum_chain_id: u64,
+ collator_session_key: CollatorSessionKeys,
+ runtime_para_id: u32,
+ assethub_parachain_id: u32,
+ weth_contract_address: H160,
+ destination_address: H160,
+ fee_amount: u128,
+ snowbridge_pallet_outbound_queue: Box<
+ dyn Fn(Vec) -> Option>,
+ >,
+) where
+ Runtime: frame_system::Config
+ + pallet_balances::Config
+ + pallet_session::Config
+ + pallet_xcm::Config
+ + parachain_info::Config
+ + pallet_collator_selection::Config
+ + pallet_message_queue::Config
+ + cumulus_pallet_parachain_system::Config
+ + snowbridge_pallet_outbound_queue::Config
+ + snowbridge_pallet_system::Config
+ + pallet_timestamp::Config,
+ XcmConfig: xcm_executor::Config,
+ AllPalletsWithoutSystem:
+ OnInitialize> + OnFinalize>,
+ ValidatorIdOf: From>,
+ ::AccountId: From + AsRef<[u8]>,
+{
+ ExtBuilder::::default()
+ .with_collators(collator_session_key.collators())
+ .with_session_keys(collator_session_key.session_keys())
+ .with_para_id(runtime_para_id.into())
+ .with_tracing()
+ .build()
+ .execute_with(|| {
+ >::initialize(
+ runtime_para_id.into(),
+ assethub_parachain_id.into(),
+ )
+ .unwrap();
+
+ // fund asset hub sovereign account enough so it can pay fees
+ initial_fund::(assethub_parachain_id, 5_000_000_000_000);
+
+ let outcome = send_transfer_token_message::(
+ ethereum_chain_id,
+ assethub_parachain_id,
+ weth_contract_address,
+ destination_address,
+ fee_amount,
+ );
+
+ assert_ok!(outcome.ensure_complete());
+
+ // check events
+ let mut events = >::events()
+ .into_iter()
+ .filter_map(|e| snowbridge_pallet_outbound_queue(e.event.encode()));
+ assert!(events.any(|e| matches!(
+ e,
+ snowbridge_pallet_outbound_queue::Event::MessageQueued { .. }
+ )));
+
+ let next_block_number: U256 = >::block_number()
+ .saturating_add(BlockNumberFor::::from(1u32))
+ .into();
+
+ let included_head =
+ RuntimeHelper::::run_to_block_with_finalize(
+ next_block_number.as_u32(),
+ );
+ let digest = included_head.digest();
+ let digest_items = digest.logs();
+
+ let mut found_outbound_digest = false;
+ for digest_item in digest_items {
+ match digest_item {
+ DigestItem::Other(_) => found_outbound_digest = true,
+ _ => {},
+ }
+ }
+
+ assert_eq!(found_outbound_digest, true);
+ });
+}
+
pub fn send_unpaid_transfer_token_message(
+ ethereum_chain_id: u64,
collator_session_key: CollatorSessionKeys,
runtime_para_id: u32,
assethub_parachain_id: u32,
@@ -213,7 +309,8 @@ pub fn send_unpaid_transfer_token_message(
+ parachain_info::Config
+ pallet_collator_selection::Config
+ cumulus_pallet_parachain_system::Config
- + snowbridge_pallet_outbound_queue::Config,
+ + snowbridge_pallet_outbound_queue::Config
+ + pallet_timestamp::Config,
XcmConfig: xcm_executor::Config,
ValidatorIdOf: From>,
{
@@ -262,7 +359,7 @@ pub fn send_unpaid_transfer_token_message(
let xcm = Xcm(vec![
UnpaidExecution { weight_limit: Unlimited, check_origin: None },
ExportMessage {
- network: Ethereum { chain_id: 11155111 },
+ network: Ethereum { chain_id: ethereum_chain_id },
destination: Here,
xcm: inner_xcm,
},
@@ -284,6 +381,7 @@ pub fn send_unpaid_transfer_token_message(
#[allow(clippy::too_many_arguments)]
pub fn send_transfer_token_message_failure(
+ ethereum_chain_id: u64,
collator_session_key: CollatorSessionKeys,
runtime_para_id: u32,
assethub_parachain_id: u32,
@@ -301,7 +399,8 @@ pub fn send_transfer_token_message_failure(
+ pallet_collator_selection::Config
+ cumulus_pallet_parachain_system::Config
+ snowbridge_pallet_outbound_queue::Config
- + snowbridge_pallet_system::Config,
+ + snowbridge_pallet_system::Config
+ + pallet_timestamp::Config,
XcmConfig: xcm_executor::Config,
ValidatorIdOf: From>,
{
@@ -322,6 +421,7 @@ pub fn send_transfer_token_message_failure(
initial_fund::(assethub_parachain_id, initial_amount);
let outcome = send_transfer_token_message::(
+ ethereum_chain_id,
assethub_parachain_id,
weth_contract_address,
destination_address,
@@ -349,7 +449,8 @@ pub fn ethereum_extrinsic(
+ cumulus_pallet_parachain_system::Config
+ snowbridge_pallet_outbound_queue::Config
+ snowbridge_pallet_system::Config
- + snowbridge_pallet_ethereum_client::Config,
+ + snowbridge_pallet_ethereum_client::Config
+ + pallet_timestamp::Config,
ValidatorIdOf: From>,
::RuntimeCall:
From>,
@@ -430,7 +531,8 @@ pub fn ethereum_to_polkadot_message_extrinsics_work(
+ cumulus_pallet_parachain_system::Config
+ snowbridge_pallet_outbound_queue::Config
+ snowbridge_pallet_system::Config
- + snowbridge_pallet_ethereum_client::Config,
+ + snowbridge_pallet_ethereum_client::Config
+ + pallet_timestamp::Config,
ValidatorIdOf: From>,
::RuntimeCall:
From>,
diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh
new file mode 100755
index 0000000000000000000000000000000000000000..32005b770ecf44cb9af18c61f830243ed5287e68
--- /dev/null
+++ b/bridges/snowbridge/scripts/contribute-upstream.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+# A script to cleanup the Snowfork fork of the polkadot-sdk to contribute it upstream back to parity/polkadot-sdk
+# ./bridges/snowbridge/scripts/contribute-upstream.sh
+
+# show CLI help
+function show_help() {
+ set +x
+ echo " "
+ echo Error: $1
+ echo "Usage:"
+ echo " ./bridges/snowbridge/scripts/contribute-upstream.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo"
+ exit 1
+}
+
+if [[ -z "$1" ]]; then
+ echo "Please provide a branch name you would like your upstream branch to be named"
+ exit 1
+fi
+
+branch_name=$1
+
+set -eux
+
+# let's avoid any restrictions on where this script can be called for - snowbridge repo may be
+# plugged into any other repo folder. So the script (and other stuff that needs to be removed)
+# may be located either in call dir, or one of it subdirs.
+SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../"
+
+# Get the current Git branch name
+current_branch=$(git rev-parse --abbrev-ref HEAD)
+
+if [ "$current_branch" = "$branch_name" ] || git branch | grep -q "$branch_name"; then
+ echo "Already on requested branch or branch exists, not creating."
+else
+ git branch "$branch_name"
+fi
+
+git checkout "$branch_name"
+
+# remove everything we think is not required for our needs
+rm -rf rust-toolchain.toml
+rm -rf codecov.yml
+rm -rf $SNOWBRIDGE_FOLDER/.cargo
+rm -rf $SNOWBRIDGE_FOLDER/.github
+rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md
+rm -rf $SNOWBRIDGE_FOLDER/.gitignore
+rm -rf $SNOWBRIDGE_FOLDER/rustfmt.toml
+rm -rf $SNOWBRIDGE_FOLDER/templates
+rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz
+
+pushd $SNOWBRIDGE_FOLDER
+
+# let's test if everything we need compiles
+cargo check -p snowbridge-pallet-ethereum-client
+cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks
+cargo check -p snowbridge-pallet-ethereum-client --features try-runtime
+cargo check -p snowbridge-pallet-inbound-queue
+cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks
+cargo check -p snowbridge-pallet-inbound-queue --features try-runtime
+cargo check -p snowbridge-pallet-outbound-queue
+cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks
+cargo check -p snowbridge-pallet-outbound-queue --features try-runtime
+cargo check -p snowbridge-pallet-system
+cargo check -p snowbridge-pallet-system --features runtime-benchmarks
+cargo check -p snowbridge-pallet-system --features try-runtime
+
+# we're removing lock file after all checks are done. Otherwise we may use different
+# Substrate/Polkadot/Cumulus commits and our checks will fail
+rm -f $SNOWBRIDGE_FOLDER/Cargo.toml
+rm -f $SNOWBRIDGE_FOLDER/Cargo.lock
+
+popd
+
+# Replace Parity's CI files, that we have overwritten in our fork, to run our own CI
+rm -rf .github
+git remote -v | grep -w parity || git remote add parity https://github.com/paritytech/polkadot-sdk
+git fetch parity master
+git checkout parity/master -- .github
+git add -- .github
+
+echo "OK"
diff --git a/bridges/snowbridge/scripts/verify-pallets-build.sh b/bridges/snowbridge/scripts/verify-pallets-build.sh
deleted file mode 100755
index a62f48c84d4fd34731c20365a20097e086aa2c99..0000000000000000000000000000000000000000
--- a/bridges/snowbridge/scripts/verify-pallets-build.sh
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/bin/bash
-
-# A script to remove everything from snowbridge repository/subtree, except:
-#
-# - parachain
-# - readme
-# - license
-
-set -eu
-
-# show CLI help
-function show_help() {
- set +x
- echo " "
- echo Error: $1
- echo "Usage:"
- echo " ./scripts/verify-pallets-build.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo"
- echo "Options:"
- echo " --no-revert Leaves only runtime code on exit"
- echo " --ignore-git-state Ignores git actual state"
- exit 1
-}
-
-# parse CLI args
-NO_REVERT=
-IGNORE_GIT_STATE=
-for i in "$@"
-do
- case $i in
- --no-revert)
- NO_REVERT=true
- shift
- ;;
- --ignore-git-state)
- IGNORE_GIT_STATE=true
- shift
- ;;
- *)
- show_help "Unknown option: $i"
- ;;
- esac
-done
-
-# the script is able to work only on clean git copy, unless we want to ignore this check
-[[ ! -z "${IGNORE_GIT_STATE}" ]] || [[ -z "$(git status --porcelain)" ]] || { echo >&2 "The git copy must be clean"; exit 1; }
-
-# let's avoid any restrictions on where this script can be called for - snowbridge repo may be
-# plugged into any other repo folder. So the script (and other stuff that needs to be removed)
-# may be located either in call dir, or one of it subdirs.
-SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../.."
-
-# remove everything we think is not required for our needs
-rm -rf $SNOWBRIDGE_FOLDER/.cargo
-rm -rf $SNOWBRIDGE_FOLDER/.github
-rm -rf $SNOWBRIDGE_FOLDER/contracts
-rm -rf $SNOWBRIDGE_FOLDER/codecov.yml
-rm -rf $SNOWBRIDGE_FOLDER/docs
-rm -rf $SNOWBRIDGE_FOLDER/hooks
-rm -rf $SNOWBRIDGE_FOLDER/relayer
-rm -rf $SNOWBRIDGE_FOLDER/scripts
-rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md
-rm -rf $SNOWBRIDGE_FOLDER/smoketest
-rm -rf $SNOWBRIDGE_FOLDER/web
-rm -rf $SNOWBRIDGE_FOLDER/.envrc-example
-rm -rf $SNOWBRIDGE_FOLDER/.gitbook.yaml
-rm -rf $SNOWBRIDGE_FOLDER/.gitignore
-rm -rf $SNOWBRIDGE_FOLDER/.gitmodules
-rm -rf $SNOWBRIDGE_FOLDER/_typos.toml
-rm -rf $SNOWBRIDGE_FOLDER/_codecov.yml
-rm -rf $SNOWBRIDGE_FOLDER/flake.lock
-rm -rf $SNOWBRIDGE_FOLDER/flake.nix
-rm -rf $SNOWBRIDGE_FOLDER/go.work
-rm -rf $SNOWBRIDGE_FOLDER/go.work.sum
-rm -rf $SNOWBRIDGE_FOLDER/polkadot-sdk
-rm -rf $SNOWBRIDGE_FOLDER/rust-toolchain.toml
-rm -rf $SNOWBRIDGE_FOLDER/parachain/rustfmt.toml
-rm -rf $SNOWBRIDGE_FOLDER/parachain/.gitignore
-rm -rf $SNOWBRIDGE_FOLDER/parachain/templates
-rm -rf $SNOWBRIDGE_FOLDER/parachain/.cargo
-rm -rf $SNOWBRIDGE_FOLDER/parachain/.config
-rm -rf $SNOWBRIDGE_FOLDER/parachain/pallets/ethereum-client/fuzz
-
-cd bridges/snowbridge/parachain
-
-# fix polkadot-sdk paths in Cargo.toml files
-find "." -name 'Cargo.toml' | while read -r file; do
- replace=$(printf '../../' )
- if [[ "$(uname)" = "Darwin" ]] || [[ "$(uname)" = *BSD ]]; then
- sed -i '' "s|polkadot-sdk/|$replace|g" "$file"
- else
- sed -i "s|polkadot-sdk/|$replace|g" "$file"
- fi
-done
-
-# let's test if everything we need compiles
-cargo check -p snowbridge-pallet-ethereum-client
-cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks
-cargo check -p snowbridge-pallet-ethereum-client --features try-runtime
-cargo check -p snowbridge-pallet-inbound-queue
-cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks
-cargo check -p snowbridge-pallet-inbound-queue --features try-runtime
-cargo check -p snowbridge-pallet-outbound-queue
-cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks
-cargo check -p snowbridge-pallet-outbound-queue --features try-runtime
-cargo check -p snowbridge-pallet-system
-cargo check -p snowbridge-pallet-system --features runtime-benchmarks
-cargo check -p snowbridge-pallet-system --features try-runtime
-
-cd -
-
-# we're removing lock file after all checks are done. Otherwise we may use different
-# Substrate/Polkadot/Cumulus commits and our checks will fail
-rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.toml
-rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.lock
-
-echo "OK"
diff --git a/bridges/zombienet/README.md b/bridges/testing/README.md
similarity index 94%
rename from bridges/zombienet/README.md
rename to bridges/testing/README.md
index b601154b624ce69ed921ea6c2453d17c4d37b6c8..bd467a410d013c363913a8e4b2be8ca7b184e2dc 100644
--- a/bridges/zombienet/README.md
+++ b/bridges/testing/README.md
@@ -23,7 +23,7 @@ To start those tests, you need to:
- copy fresh `substrate-relay` binary, built in previous point, to the `~/local_bridge_testing/bin/substrate-relay`;
-- change the `POLKADOT_SDK_FOLDER` and `ZOMBIENET_BINARY_PATH` (and ensure that the nearby variables
+- change the `POLKADOT_SDK_PATH` and `ZOMBIENET_BINARY_PATH` (and ensure that the nearby variables
have correct values) in the `./run-tests.sh`.
After that, you could run tests with the `./run-tests.sh` command. Hopefully, it'll show the
diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml
similarity index 82%
rename from cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml
rename to bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml
index 99a7d0035b511c57ccf5c10fa94165933c495ba9..52271f9442131923f8a758b16df7610e73813d15 100644
--- a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml
+++ b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml
@@ -2,7 +2,7 @@
node_spawn_timeout = 240
[relaychain]
-default_command = "{{POLKADOT_BINARY_PATH}}"
+default_command = "{{POLKADOT_BINARY}}"
default_args = [ "-lparachain=debug,xcm=trace" ]
chain = "rococo-local"
@@ -36,24 +36,22 @@ cumulus_based = true
[[parachains.collators]]
name = "bridge-hub-rococo-collator1"
validator = true
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
rpc_port = 8933
ws_port = 8943
args = [
- "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace",
- "--force-authoring"
+ "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace"
]
# run bob as parachain collator
[[parachains.collators]]
name = "bridge-hub-rococo-collator2"
validator = true
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
rpc_port = 8934
ws_port = 8944
args = [
- "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace",
- "--force-authoring"
+ "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace"
]
[[parachains]]
@@ -65,14 +63,14 @@ cumulus_based = true
name = "asset-hub-rococo-collator1"
rpc_port = 9911
ws_port = 9910
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
args = [
"-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace"
]
[[parachains.collators]]
name = "asset-hub-rococo-collator2"
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
args = [
"-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace"
]
diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml
similarity index 84%
rename from cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml
rename to bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml
index 1919d1c63f25f154e4676599afb8a2969598c10b..f2550bcc9959638b21ea78043cca3bc12d3d23ea 100644
--- a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml
+++ b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml
@@ -2,7 +2,7 @@
node_spawn_timeout = 240
[relaychain]
-default_command = "{{POLKADOT_BINARY_PATH}}"
+default_command = "{{POLKADOT_BINARY}}"
default_args = [ "-lparachain=debug,xcm=trace" ]
chain = "westend-local"
@@ -36,24 +36,22 @@ cumulus_based = true
[[parachains.collators]]
name = "bridge-hub-westend-collator1"
validator = true
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
rpc_port = 8935
ws_port = 8945
args = [
- "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace",
- "--force-authoring"
+ "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace"
]
# run bob as parachain collator
[[parachains.collators]]
name = "bridge-hub-westend-collator2"
validator = true
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
rpc_port = 8936
ws_port = 8946
args = [
- "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace",
- "--force-authoring"
+ "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace"
]
[[parachains]]
@@ -65,14 +63,14 @@ cumulus_based = true
name = "asset-hub-westend-collator1"
rpc_port = 9011
ws_port = 9010
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
args = [
"-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace"
]
[[parachains.collators]]
name = "asset-hub-westend-collator2"
- command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}"
+ command = "{{POLKADOT_PARACHAIN_BINARY}}"
args = [
"-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace"
]
diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh
similarity index 97%
rename from cumulus/scripts/bridges_rococo_westend.sh
rename to bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh
index 3b6f8e892858ad034a9db23a717b4290f9024bde..479ab833abfc38dd978c7b7d3abdd4c1fe37ad64 100755
--- a/cumulus/scripts/bridges_rococo_westend.sh
+++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# import common functions
-source "$(dirname "$0")"/bridges_common.sh
+source "$FRAMEWORK_PATH/utils/bridges.sh"
# Expected sovereign accounts.
#
@@ -185,8 +185,8 @@ function run_relay() {
case "$1" in
run-relay)
- init_ro_wnd
init_wnd_ro
+ init_ro_wnd
run_relay
;;
init-asset-hub-rococo-local)
@@ -319,6 +319,7 @@ case "$1" in
$XCM_VERSION
;;
reserve-transfer-assets-from-asset-hub-rococo-local)
+ amount=$2
ensure_polkadot_js_api
# send ROCs to Alice account on AHW
limited_reserve_transfer_assets \
@@ -326,11 +327,12 @@ case "$1" in
"//Alice" \
"$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \
"$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \
- "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \
+ "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \
0 \
"Unlimited"
;;
withdraw-reserve-assets-from-asset-hub-rococo-local)
+ amount=$2
ensure_polkadot_js_api
# send back only 100000000000 wrappedWNDs to Alice account on AHW
limited_reserve_transfer_assets \
@@ -338,11 +340,12 @@ case "$1" in
"//Alice" \
"$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \
"$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \
- "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \
+ "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \
0 \
"Unlimited"
;;
reserve-transfer-assets-from-asset-hub-westend-local)
+ amount=$2
ensure_polkadot_js_api
# send WNDs to Alice account on AHR
limited_reserve_transfer_assets \
@@ -350,11 +353,12 @@ case "$1" in
"//Alice" \
"$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \
"$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \
- "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \
+ "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \
0 \
"Unlimited"
;;
withdraw-reserve-assets-from-asset-hub-westend-local)
+ amount=$2
ensure_polkadot_js_api
# send back only 100000000000 wrappedROCs to Alice account on AHR
limited_reserve_transfer_assets \
@@ -362,7 +366,7 @@ case "$1" in
"//Alice" \
"$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \
"$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \
- "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \
+ "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \
0 \
"Unlimited"
;;
diff --git a/bridges/testing/environments/rococo-westend/helper.sh b/bridges/testing/environments/rococo-westend/helper.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0a13ded213f5d3a0920cb466fc974c129e9ad79a
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/helper.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+$ENV_PATH/bridges_rococo_westend.sh "$@"
diff --git a/bridges/testing/environments/rococo-westend/rococo-init.zndsl b/bridges/testing/environments/rococo-westend/rococo-init.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..c913e4db31f49184eb8214fda4d525c3594b358b
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/rococo-init.zndsl
@@ -0,0 +1,8 @@
+Description: Check if the HRMP channel between Rococo BH and Rococo AH was opened successfully
+Network: ./bridge_hub_rococo_local_network.toml
+Creds: config
+
+# ensure that initialization has completed
+asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wait-hrmp-channel-opened.js with "1013" within 300 seconds
+
+
diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..5b49c7c632fa4dd0ce77134858a2f697acbfff16
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/rococo.zndsl
@@ -0,0 +1,7 @@
+Description: Check if the with-Westend GRANPDA pallet was initialized at Rococo BH
+Network: ./bridge_hub_rococo_local_network.toml
+Creds: config
+
+# relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo
+bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds
+
diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh
new file mode 100755
index 0000000000000000000000000000000000000000..cbd0b1bc623ab77876ed5ce3beefd7ab72db2d37
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/spawn.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+set -e
+
+trap "trap - SIGTERM && kill -9 -$$" SIGINT SIGTERM EXIT
+
+source "$FRAMEWORK_PATH/utils/zombienet.sh"
+
+# whether to init the chains (open HRMP channels, set XCM version, create reserve assets, etc)
+init=0
+start_relayer=0
+while [ $# -ne 0 ]
+do
+ arg="$1"
+ case "$arg" in
+ --init)
+ init=1
+ ;;
+ --start-relayer)
+ start_relayer=1
+ ;;
+ esac
+ shift
+done
+
+logs_dir=$TEST_DIR/logs
+helper_script="${BASH_SOURCE%/*}/helper.sh"
+
+rococo_def=${BASH_SOURCE%/*}/bridge_hub_rococo_local_network.toml
+start_zombienet $TEST_DIR $rococo_def rococo_dir rococo_pid
+echo
+
+westend_def=${BASH_SOURCE%/*}/bridge_hub_westend_local_network.toml
+start_zombienet $TEST_DIR $westend_def westend_dir westend_pid
+echo
+
+if [[ $init -eq 1 ]]; then
+ rococo_init_log=$logs_dir/rococo-init.log
+ echo -e "Setting up the rococo side of the bridge. Logs available at: $rococo_init_log\n"
+
+ westend_init_log=$logs_dir/westend-init.log
+ echo -e "Setting up the westend side of the bridge. Logs available at: $westend_init_log\n"
+
+ $helper_script init-asset-hub-rococo-local >> $rococo_init_log 2>&1 &
+ rococo_init_pid=$!
+ $helper_script init-asset-hub-westend-local >> $westend_init_log 2>&1 &
+ westend_init_pid=$!
+ wait -n $rococo_init_pid $westend_init_pid
+
+
+ $helper_script init-bridge-hub-rococo-local >> $rococo_init_log 2>&1 &
+ rococo_init_pid=$!
+ $helper_script init-bridge-hub-westend-local >> $westend_init_log 2>&1 &
+ westend_init_pid=$!
+ wait -n $rococo_init_pid $westend_init_pid
+
+ run_zndsl ${BASH_SOURCE%/*}/rococo-init.zndsl $rococo_dir
+ run_zndsl ${BASH_SOURCE%/*}/westend-init.zndsl $westend_dir
+fi
+
+if [[ $start_relayer -eq 1 ]]; then
+ ${BASH_SOURCE%/*}/start_relayer.sh $rococo_dir $westend_dir relayer_pid
+fi
+
+echo $rococo_dir > $TEST_DIR/rococo.env
+echo $westend_dir > $TEST_DIR/westend.env
+echo
+
+wait -n $rococo_pid $westend_pid $relayer_pid
+kill -9 -$$
diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7ddd312d395aa8733d2afea59277b48721c8a36b
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/start_relayer.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -e
+
+source "$FRAMEWORK_PATH/utils/common.sh"
+source "$FRAMEWORK_PATH/utils/zombienet.sh"
+
+rococo_dir=$1
+westend_dir=$2
+__relayer_pid=$3
+
+logs_dir=$TEST_DIR/logs
+helper_script="${BASH_SOURCE%/*}/helper.sh"
+
+relayer_log=$logs_dir/relayer.log
+echo -e "Starting rococo-westend relayer. Logs available at: $relayer_log\n"
+start_background_process "$helper_script run-relay" $relayer_log relayer_pid
+
+run_zndsl ${BASH_SOURCE%/*}/rococo.zndsl $rococo_dir
+run_zndsl ${BASH_SOURCE%/*}/westend.zndsl $westend_dir
+
+eval $__relayer_pid="'$relayer_pid'"
+
diff --git a/bridges/testing/environments/rococo-westend/westend-init.zndsl b/bridges/testing/environments/rococo-westend/westend-init.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..0f5428eed3b01c042f8aad3b3df51c3a800a9b72
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/westend-init.zndsl
@@ -0,0 +1,7 @@
+Description: Check if the HRMP channel between Westend BH and Westend AH was opened successfully
+Network: ./bridge_hub_westend_local_network.toml
+Creds: config
+
+# ensure that initialization has completed
+asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds
+
diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..07968838852f7c0a00131db3080c460c07d08206
--- /dev/null
+++ b/bridges/testing/environments/rococo-westend/westend.zndsl
@@ -0,0 +1,6 @@
+Description: Check if the with-Rococo GRANPDA pallet was initialized at Westend BH
+Network: ./bridge_hub_westend_local_network.toml
+Creds: config
+
+# relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend
+bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds
diff --git a/bridges/zombienet/helpers/best-finalized-header-at-bridged-chain.js b/bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js
similarity index 94%
rename from bridges/zombienet/helpers/best-finalized-header-at-bridged-chain.js
rename to bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js
index f7e1eefc84b3fa3e799d7111608cfc39783f5e21..af4f18aee9b2710612ed142c50b28caf8313326d 100644
--- a/bridges/zombienet/helpers/best-finalized-header-at-bridged-chain.js
+++ b/bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js
@@ -18,7 +18,7 @@ async function run(nodeName, networkInfo, args) {
}
// else sleep and retry
- await new Promise((resolve) => setTimeout(resolve, 12000));
+ await new Promise((resolve) => setTimeout(resolve, 6000));
}
}
diff --git a/bridges/zombienet/helpers/chains/rococo-at-westend.js b/bridges/testing/framework/js-helpers/chains/rococo-at-westend.js
similarity index 100%
rename from bridges/zombienet/helpers/chains/rococo-at-westend.js
rename to bridges/testing/framework/js-helpers/chains/rococo-at-westend.js
diff --git a/bridges/zombienet/helpers/chains/westend-at-rococo.js b/bridges/testing/framework/js-helpers/chains/westend-at-rococo.js
similarity index 100%
rename from bridges/zombienet/helpers/chains/westend-at-rococo.js
rename to bridges/testing/framework/js-helpers/chains/westend-at-rococo.js
diff --git a/bridges/zombienet/helpers/native-assets-balance-increased.js b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js
similarity index 74%
rename from bridges/zombienet/helpers/native-assets-balance-increased.js
rename to bridges/testing/framework/js-helpers/native-assets-balance-increased.js
index 9ee1a769e9f2807ed7b73ca9c6aa4b89d5c135f9..749c3e2fec32ac0af4d244c53cb4ac1c6237817a 100644
--- a/bridges/zombienet/helpers/native-assets-balance-increased.js
+++ b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js
@@ -3,18 +3,19 @@ async function run(nodeName, networkInfo, args) {
const api = await zombie.connect(wsUri, userDefinedTypes);
const accountAddress = args[0];
+ const expectedIncrease = BigInt(args[1]);
const initialAccountData = await api.query.system.account(accountAddress);
const initialAccountBalance = initialAccountData.data['free'];
while (true) {
const accountData = await api.query.system.account(accountAddress);
const accountBalance = accountData.data['free'];
- if (accountBalance > initialAccountBalance) {
+ if (accountBalance > initialAccountBalance + expectedIncrease) {
return accountBalance;
}
// else sleep and retry
- await new Promise((resolve) => setTimeout(resolve, 12000));
+ await new Promise((resolve) => setTimeout(resolve, 6000));
}
}
-module.exports = { run }
+module.exports = {run}
diff --git a/bridges/zombienet/helpers/only-mandatory-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js
similarity index 88%
rename from bridges/zombienet/helpers/only-mandatory-headers-synced-when-idle.js
rename to bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js
index 3a3432cfaf38da93f3ea0e65657f266b66f84d74..979179245ebe9f5b250efca6f2e6199ef0ac86d7 100644
--- a/bridges/zombienet/helpers/only-mandatory-headers-synced-when-idle.js
+++ b/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js
@@ -10,7 +10,7 @@ async function run(nodeName, networkInfo, args) {
// start listening to new blocks
let totalGrandpaHeaders = 0;
- let totalParachainHeaders = 0;
+ let initialParachainHeaderImported = false;
api.rpc.chain.subscribeNewHeads(async function (header) {
const apiAtParent = await api.at(header.parentHash);
const apiAtCurrent = await api.at(header.hash);
@@ -22,7 +22,7 @@ async function run(nodeName, networkInfo, args) {
apiAtCurrent,
currentEvents,
);
- totalParachainHeaders += await utils.ensureOnlyInitialParachainHeaderImported(
+ initialParachainHeaderImported = await utils.ensureOnlyInitialParachainHeaderImported(
bridgedChain,
apiAtParent,
apiAtCurrent,
@@ -36,7 +36,7 @@ async function run(nodeName, networkInfo, args) {
if (totalGrandpaHeaders == 0) {
throw new Error("No bridged relay chain headers imported");
}
- if (totalParachainHeaders == 0) {
+ if (!initialParachainHeaderImported) {
throw new Error("No bridged parachain headers imported");
}
}
diff --git a/bridges/zombienet/helpers/only-required-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js
similarity index 100%
rename from bridges/zombienet/helpers/only-required-headers-synced-when-idle.js
rename to bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js
diff --git a/bridges/zombienet/helpers/relayer-rewards.js b/bridges/testing/framework/js-helpers/relayer-rewards.js
similarity index 93%
rename from bridges/zombienet/helpers/relayer-rewards.js
rename to bridges/testing/framework/js-helpers/relayer-rewards.js
index a5f567db797722e04d3bfae90745a728ff1abdff..5347c649604fc209042725c9cf269c9d3ca0290f 100644
--- a/bridges/zombienet/helpers/relayer-rewards.js
+++ b/bridges/testing/framework/js-helpers/relayer-rewards.js
@@ -21,7 +21,7 @@ async function run(nodeName, networkInfo, args) {
}
// else sleep and retry
- await new Promise((resolve) => setTimeout(resolve, 12000));
+ await new Promise((resolve) => setTimeout(resolve, 6000));
}
}
diff --git a/bridges/zombienet/helpers/utils.js b/bridges/testing/framework/js-helpers/utils.js
similarity index 98%
rename from bridges/zombienet/helpers/utils.js
rename to bridges/testing/framework/js-helpers/utils.js
index 5a5542b56dfc215a082fc6fbb8c1b9aa018de83e..f6e9f5623b47b3cb3c642245e86654ae9f65358a 100644
--- a/bridges/zombienet/helpers/utils.js
+++ b/bridges/testing/framework/js-helpers/utils.js
@@ -98,6 +98,6 @@ module.exports = {
throw new Error("Unexpected parachain header import: " + newParachainHeaders + " / " + maxNewParachainHeaders);
}
- return newParachainHeaders;
+ return hasBestBridgedParachainHeader;
},
}
diff --git a/bridges/zombienet/helpers/wait-hrmp-channel-opened.js b/bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js
similarity index 91%
rename from bridges/zombienet/helpers/wait-hrmp-channel-opened.js
rename to bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js
index e700cab1d7481d77631e55492e4b0032f4382028..765d48cc49848ab7a4389f6e0d9b9b3b8cb38f2b 100644
--- a/bridges/zombienet/helpers/wait-hrmp-channel-opened.js
+++ b/bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js
@@ -15,7 +15,7 @@ async function run(nodeName, networkInfo, args) {
}
// else sleep and retry
- await new Promise((resolve) => setTimeout(resolve, 12000));
+ await new Promise((resolve) => setTimeout(resolve, 6000));
}
}
diff --git a/bridges/zombienet/helpers/wrapped-assets-balance.js b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js
similarity index 93%
rename from bridges/zombienet/helpers/wrapped-assets-balance.js
rename to bridges/testing/framework/js-helpers/wrapped-assets-balance.js
index bb3cea8858a850e551ba0380b1557ccad0761717..27287118547f702b3e94eb635f9e3855d1cab535 100644
--- a/bridges/zombienet/helpers/wrapped-assets-balance.js
+++ b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js
@@ -19,7 +19,7 @@ async function run(nodeName, networkInfo, args) {
}
// else sleep and retry
- await new Promise((resolve) => setTimeout(resolve, 12000));
+ await new Promise((resolve) => setTimeout(resolve, 6000));
}
}
diff --git a/cumulus/scripts/bridges_common.sh b/bridges/testing/framework/utils/bridges.sh
similarity index 97%
rename from cumulus/scripts/bridges_common.sh
rename to bridges/testing/framework/utils/bridges.sh
index 029d4cd4ff74a5c88165913a48b2b369c0f185b8..7c8399461584a85e4e8eedf5f347d9d74725f1c9 100755
--- a/cumulus/scripts/bridges_common.sh
+++ b/bridges/testing/framework/utils/bridges.sh
@@ -2,7 +2,7 @@
function relayer_path() {
local default_path=~/local_bridge_testing/bin/substrate-relay
- local path="${SUBSTRATE_RELAY_PATH:-$default_path}"
+ local path="${SUBSTRATE_RELAY_BINARY:-$default_path}"
echo "$path"
}
@@ -41,8 +41,8 @@ function ensure_polkadot_js_api() {
echo ""
echo ""
echo "-------------------"
- echo "Installing (nodejs) sub module: $(dirname "$0")/generate_hex_encoded_call"
- pushd $(dirname "$0")/generate_hex_encoded_call
+ echo "Installing (nodejs) sub module: ${BASH_SOURCE%/*}/generate_hex_encoded_call"
+ pushd ${BASH_SOURCE%/*}/generate_hex_encoded_call
npm install
popd
fi
@@ -65,7 +65,7 @@ function generate_hex_encoded_call_data() {
shift
echo "Input params: $@"
- node $(dirname "$0")/generate_hex_encoded_call "$type" "$endpoint" "$output" "$@"
+ node ${BASH_SOURCE%/*}/../utils/generate_hex_encoded_call "$type" "$endpoint" "$output" "$@"
local retVal=$?
if [ $type != "check" ]; then
diff --git a/bridges/testing/framework/utils/common.sh b/bridges/testing/framework/utils/common.sh
new file mode 100644
index 0000000000000000000000000000000000000000..06f41320be1353720fccc76b7b76e69ba56a3b94
--- /dev/null
+++ b/bridges/testing/framework/utils/common.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+function start_background_process() {
+ local command=$1
+ local log_file=$2
+ local __pid=$3
+
+ $command > $log_file 2>&1 &
+ eval $__pid="'$!'"
+}
+
+function wait_for_process_file() {
+ local pid=$1
+ local file=$2
+ local timeout=$3
+ local __found=$4
+
+ local time=0
+ until [ -e $file ]; do
+ if ! kill -0 $pid; then
+ echo "Process finished unsuccessfully"
+ return
+ fi
+ if (( time++ >= timeout )); then
+ echo "Timeout waiting for file $file: $timeout seconds"
+ eval $__found=0
+ return
+ fi
+ sleep 1
+ done
+
+ echo "File $file found after $time seconds"
+ eval $__found=1
+}
+
+function ensure_process_file() {
+ local pid=$1
+ local file=$2
+ local timeout=$3
+
+ wait_for_process_file $pid $file $timeout file_found
+ if [ "$file_found" != "1" ]; then
+ exit 1
+ fi
+}
diff --git a/cumulus/scripts/generate_hex_encoded_call/index.js b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js
similarity index 100%
rename from cumulus/scripts/generate_hex_encoded_call/index.js
rename to bridges/testing/framework/utils/generate_hex_encoded_call/index.js
diff --git a/cumulus/scripts/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json
similarity index 100%
rename from cumulus/scripts/generate_hex_encoded_call/package-lock.json
rename to bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json
diff --git a/cumulus/scripts/generate_hex_encoded_call/package.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package.json
similarity index 100%
rename from cumulus/scripts/generate_hex_encoded_call/package.json
rename to bridges/testing/framework/utils/generate_hex_encoded_call/package.json
diff --git a/bridges/testing/framework/utils/zombienet.sh b/bridges/testing/framework/utils/zombienet.sh
new file mode 100644
index 0000000000000000000000000000000000000000..bbcd1a30620252d8740473c3924e0988e5bff4d6
--- /dev/null
+++ b/bridges/testing/framework/utils/zombienet.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+source "${BASH_SOURCE%/*}/common.sh"
+
+function start_zombienet() {
+ local test_dir=$1
+ local definition_path=$2
+ local __zombienet_dir=$3
+ local __zombienet_pid=$4
+
+ local zombienet_name=`basename $definition_path .toml`
+ local zombienet_dir=$test_dir/$zombienet_name
+ eval $__zombienet_dir="'$zombienet_dir'"
+ mkdir -p $zombienet_dir
+ rm -rf $zombienet_dir
+
+ local logs_dir=$test_dir/logs
+ mkdir -p $logs_dir
+ local zombienet_log=$logs_dir/$zombienet_name.log
+
+ echo "Starting $zombienet_name zombienet. Logs available at: $zombienet_log"
+ start_background_process \
+ "$ZOMBIENET_BINARY spawn --dir $zombienet_dir --provider native $definition_path" \
+ "$zombienet_log" zombienet_pid
+
+ ensure_process_file $zombienet_pid "$zombienet_dir/zombie.json" 180
+ echo "$zombienet_name zombienet started successfully"
+
+ eval $__zombienet_pid="'$zombienet_pid'"
+}
+
+function run_zndsl() {
+ local zndsl_file=$1
+ local zombienet_dir=$2
+
+ echo "Running $zndsl_file."
+ $ZOMBIENET_BINARY test --dir $zombienet_dir --provider native $zndsl_file $zombienet_dir/zombie.json
+ echo
+}
diff --git a/bridges/testing/run-new-test.sh b/bridges/testing/run-new-test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7c84a69aa47de84439091cb7b908233d02238175
--- /dev/null
+++ b/bridges/testing/run-new-test.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+set -e
+
+trap 'kill -9 -$$ || echo "Environment already teared down"' SIGINT SIGTERM EXIT
+
+test=$1
+shift
+
+# whether to use paths for zombienet+bridges tests container or for local testing
+ZOMBIENET_DOCKER_PATHS=0
+while [ $# -ne 0 ]
+do
+ arg="$1"
+ case "$arg" in
+ --docker)
+ ZOMBIENET_DOCKER_PATHS=1
+ ;;
+ esac
+ shift
+done
+
+export POLKADOT_SDK_PATH=`realpath ${BASH_SOURCE%/*}/../..`
+export FRAMEWORK_PATH=`realpath ${BASH_SOURCE%/*}/framework`
+
+# set path to binaries
+if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then
+ # otherwise zombienet uses some hardcoded paths
+ unset RUN_IN_CONTAINER
+ unset ZOMBIENET_IMAGE
+
+ export POLKADOT_BINARY=/usr/local/bin/polkadot
+ export POLKADOT_PARACHAIN_BINARY=/usr/local/bin/polkadot-parachain
+
+ export ZOMBIENET_BINARY=/usr/local/bin/zombie
+ export SUBSTRATE_RELAY_BINARY=/usr/local/bin/substrate-relay
+else
+ export POLKADOT_BINARY=$POLKADOT_SDK_PATH/target/release/polkadot
+ export POLKADOT_PARACHAIN_BINARY=$POLKADOT_SDK_PATH/target/release/polkadot-parachain
+
+ export ZOMBIENET_BINARY=~/local_bridge_testing/bin/zombienet-linux-x64
+ export SUBSTRATE_RELAY_BINARY=~/local_bridge_testing/bin/substrate-relay
+fi
+
+export TEST_DIR=`mktemp -d /tmp/bridges-tests-run-XXXXX`
+echo -e "Test folder: $TEST_DIR\n"
+
+${BASH_SOURCE%/*}/tests/$test/run.sh
diff --git a/bridges/zombienet/run-tests.sh b/bridges/testing/run-tests.sh
similarity index 77%
rename from bridges/zombienet/run-tests.sh
rename to bridges/testing/run-tests.sh
index cf3b529e6a9d9823f875938d8603b363c6079136..6149d9912653c79968a0229759c8f1bf46f68a9f 100755
--- a/bridges/zombienet/run-tests.sh
+++ b/bridges/testing/run-tests.sh
@@ -27,34 +27,27 @@ done
# assuming that we'll be using native provide && all processes will be executing locally
# (we need absolute paths here, because they're used when scripts are called by zombienet from tmp folders)
-export POLKADOT_SDK_FOLDER=`realpath $(dirname "$0")/../..`
-export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_FOLDER/bridges/zombienet/tests
+export POLKADOT_SDK_PATH=`realpath $(dirname "$0")/../..`
+export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_PATH/bridges/testing/tests
# set pathc to binaries
if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then
- export POLKADOT_BINARY_PATH=/usr/local/bin/polkadot
- export POLKADOT_PARACHAIN_BINARY_PATH=/usr/local/bin/polkadot-parachain
- export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=/usr/local/bin/polkadot-parachain
- export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=/usr/local/bin/polkadot-parachain
+ export POLKADOT_BINARY=/usr/local/bin/polkadot
+ export POLKADOT_PARACHAIN_BINARY=/usr/local/bin/polkadot-parachain
- export SUBSTRATE_RELAY_PATH=/usr/local/bin/substrate-relay
+ export SUBSTRATE_RELAY_BINARY=/usr/local/bin/substrate-relay
export ZOMBIENET_BINARY_PATH=/usr/local/bin/zombie
else
- export POLKADOT_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot
- export POLKADOT_PARACHAIN_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot-parachain
- export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=$POLKADOT_PARACHAIN_BINARY_PATH
- export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=$POLKADOT_PARACHAIN_BINARY_PATH
+ export POLKADOT_BINARY=$POLKADOT_SDK_PATH/target/release/polkadot
+ export POLKADOT_PARACHAIN_BINARY=$POLKADOT_SDK_PATH/target/release/polkadot-parachain
- export SUBSTRATE_RELAY_PATH=~/local_bridge_testing/bin/substrate-relay
+ export SUBSTRATE_RELAY_BINARY=~/local_bridge_testing/bin/substrate-relay
export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux
fi
# check if `wait` supports -p flag
if [ `printf "$BASH_VERSION\n5.1" | sort -V | head -n 1` = "5.1" ]; then IS_BASH_5_1=1; else IS_BASH_5_1=0; fi
-# check if `wait` supports -p flag
-if [ `printf "$BASH_VERSION\n5.1" | sort -V | head -n 1` = "5.1" ]; then IS_BASH_5_1=1; else IS_BASH_5_1=0; fi
-
# bridge configuration
export LANE_ID="00000002"
diff --git a/bridges/zombienet/scripts/invoke-script.sh b/bridges/testing/scripts/invoke-script.sh
similarity index 62%
rename from bridges/zombienet/scripts/invoke-script.sh
rename to bridges/testing/scripts/invoke-script.sh
index 835b4fe500f01ea2968bcb8bff538491ec7149bc..cd0557b071bbadc41e056a2e50c9f1aa0b677312 100755
--- a/bridges/zombienet/scripts/invoke-script.sh
+++ b/bridges/testing/scripts/invoke-script.sh
@@ -2,6 +2,6 @@
INVOKE_LOG=`mktemp -p $TEST_FOLDER invoke.XXXXX`
-pushd $POLKADOT_SDK_FOLDER/cumulus/scripts
+pushd $POLKADOT_SDK_PATH/bridges/testing/environments/rococo-westend
./bridges_rococo_westend.sh $1 >$INVOKE_LOG 2>&1
popd
diff --git a/bridges/zombienet/scripts/start-relayer.sh b/bridges/testing/scripts/start-relayer.sh
similarity index 63%
rename from bridges/zombienet/scripts/start-relayer.sh
rename to bridges/testing/scripts/start-relayer.sh
index 2f72b5ee556bcc8a89b2de4c5d3c53db8ac072b1..38ea62fad524486c40cf88943c48a2e4df4b86e8 100755
--- a/bridges/zombienet/scripts/start-relayer.sh
+++ b/bridges/testing/scripts/start-relayer.sh
@@ -2,6 +2,6 @@
RELAY_LOG=`mktemp -p $TEST_FOLDER relay.XXXXX`
-pushd $POLKADOT_SDK_FOLDER/cumulus/scripts
+pushd $POLKADOT_SDK_PATH/bridges/testing/environments/rococo-westend
./bridges_rococo_westend.sh run-relay >$RELAY_LOG 2>&1&
popd
diff --git a/bridges/zombienet/scripts/sync-exit.sh b/bridges/testing/scripts/sync-exit.sh
similarity index 100%
rename from bridges/zombienet/scripts/sync-exit.sh
rename to bridges/testing/scripts/sync-exit.sh
diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..a58520ccea65b50dd0db1f67a72f6f8a4c5cdb38
--- /dev/null
+++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl
@@ -0,0 +1,12 @@
+Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back
+Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml
+Creds: config
+
+# send 5 ROC to //Alice from Rococo AH to Westend AH
+asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds
+
+# check that //Alice received at least 4.8 ROC on Westend AH
+asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 300 seconds
+
+# check that the relayer //Charlie is rewarded by Westend AH
+bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds
diff --git a/bridges/testing/tests/0001-asset-transfer/run.sh b/bridges/testing/tests/0001-asset-transfer/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a7bb122919b40187c49e89c489d2271d646bff40
--- /dev/null
+++ b/bridges/testing/tests/0001-asset-transfer/run.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+set -e
+
+source "${BASH_SOURCE%/*}/../../framework/utils/common.sh"
+source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh"
+
+export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend`
+
+$ENV_PATH/spawn.sh --init --start-relayer &
+env_pid=$!
+
+ensure_process_file $env_pid $TEST_DIR/rococo.env 600
+rococo_dir=`cat $TEST_DIR/rococo.env`
+echo
+
+ensure_process_file $env_pid $TEST_DIR/westend.env 300
+westend_dir=`cat $TEST_DIR/westend.env`
+echo
+
+run_zndsl ${BASH_SOURCE%/*}/roc-reaches-westend.zndsl $westend_dir
+run_zndsl ${BASH_SOURCE%/*}/wnd-reaches-rococo.zndsl $rococo_dir
+
+run_zndsl ${BASH_SOURCE%/*}/wroc-reaches-rococo.zndsl $rococo_dir
+run_zndsl ${BASH_SOURCE%/*}/wwnd-reaches-westend.zndsl $westend_dir
diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..fedb78cc2103555a1d15c446dd2f08fca94643e1
--- /dev/null
+++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl
@@ -0,0 +1,12 @@
+Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back
+Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml
+Creds: config
+
+# send 5 WND to //Alice from Westend AH to Rococo AH
+asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds
+
+# check that //Alice received at least 4.8 WND on Rococo AH
+asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 300 seconds
+
+# check that the relayer //Charlie is rewarded by Rococo AH
+bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds
diff --git a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..68b888b6858e86b8fe846b887bc101e221b2f21d
--- /dev/null
+++ b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl
@@ -0,0 +1,10 @@
+Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back
+Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml
+Creds: config
+
+# send 3 wROC back to Alice from Westend AH to Rococo AH
+asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local 3000000000000" within 120 seconds
+
+# check that //Alice received at least 2.8 wROC on Rococo AH
+# (we wait until //Alice account increases here - there are no other transactions that may increase it)
+asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds
diff --git a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..1a8a161819542e281094aed0681d52167aaea8e6
--- /dev/null
+++ b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl
@@ -0,0 +1,10 @@
+Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back
+Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml
+Creds: config
+
+# send 3 wWND back to Alice from Rococo AH to Westend AH
+asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local 3000000000000" within 120 seconds
+
+# check that //Alice received at least 2.8 wWND on Westend AH
+# (we wait until //Alice account increases here - there are no other transactions that may increase it)
+asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds
diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..6e381f5377329430c0d7a8723f9ea9081556bfeb
--- /dev/null
+++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl
@@ -0,0 +1,8 @@
+Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH.
+Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml
+Creds: config
+
+# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were
+# generated while relay was offline and those in the next 100 seconds while script is active.
+bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds
+
diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7d5b8d9273664b0861e8ffe1c528e9e1718c4df4
--- /dev/null
+++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+set -e
+
+source "${BASH_SOURCE%/*}/../../framework/utils/common.sh"
+source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh"
+
+export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend`
+
+$ENV_PATH/spawn.sh &
+env_pid=$!
+
+ensure_process_file $env_pid $TEST_DIR/rococo.env 600
+rococo_dir=`cat $TEST_DIR/rococo.env`
+echo
+
+ensure_process_file $env_pid $TEST_DIR/westend.env 300
+westend_dir=`cat $TEST_DIR/westend.env`
+echo
+
+# Sleep for some time before starting the relayer. We want to sleep for at least 1 session,
+# which is expected to be 60 seconds for the test environment.
+echo -e "Sleeping 90s before starting relayer ...\n"
+sleep 90
+${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid
+
+# Sometimes the relayer syncs multiple parachain heads in the begining leading to test failures.
+# See issue: https://github.com/paritytech/parity-bridges-common/issues/2838.
+# TODO: Remove this sleep after the issue is fixed.
+echo -e "Sleeping 180s before runing the tests ...\n"
+sleep 180
+
+run_zndsl ${BASH_SOURCE%/*}/rococo-to-westend.zndsl $westend_dir
+run_zndsl ${BASH_SOURCE%/*}/westend-to-rococo.zndsl $rococo_dir
+
diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl
new file mode 100644
index 0000000000000000000000000000000000000000..b4b3e43679162feb8c3c5253f3f963d950f31d55
--- /dev/null
+++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl
@@ -0,0 +1,7 @@
+Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH.
+Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml
+Creds: config
+
+# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were
+# generated while relay was offline and those in the next 100 seconds while script is active.
+bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds
diff --git a/bridges/zombienet/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl b/bridges/testing/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
similarity index 77%
rename from bridges/zombienet/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
rename to bridges/testing/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
index a4960344f0a03265d2accfa52cd9a4ab1d7117d6..07b91481dc7cf995b913a9bf84edd3728982eaae 100644
--- a/bridges/zombienet/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
+++ b/bridges/testing/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl
@@ -1,5 +1,5 @@
Description: While relayer is active, we only sync mandatory and required Rococo (and Rococo BH) headers to Westend BH.
-Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml
+Network: ../environments/rococo-westend/bridge_hub_westend_local_network.toml
Creds: config
# step 1: initialize Westend AH
@@ -9,7 +9,7 @@ asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hu
bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds
# step 3: ensure that initialization has completed
-asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds
+asset-hub-westend-collator1: js-script ../js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds
# step 4: send message from Westend to Rococo
asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds
@@ -20,7 +20,7 @@ asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-trans
# (it is started by sibling 0003-required-headers-synced-while-active-westend-to-rococo.zndsl)
# step 6: ensure that relayer won't sync any extra headers while delivering messages and confirmations
-bridge-hub-westend-collator1: js-script ../helpers/only-required-headers-synced-when-active.js with "500,rococo-at-westend" within 600 seconds
+bridge-hub-westend-collator1: js-script ../js-helpers/only-required-headers-synced-when-active.js with "500,rococo-at-westend" within 600 seconds
# wait until other network test has completed OR exit with an error too
asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/zombienet/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl b/bridges/testing/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
similarity index 77%
rename from bridges/zombienet/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
rename to bridges/testing/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
index 33c3ceebcf844cc6029d41deb289b1a1d8103132..a6b11fc24052aadf562bc34704aeda9ee115eccf 100644
--- a/bridges/zombienet/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
+++ b/bridges/testing/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl
@@ -1,5 +1,5 @@
Description: While relayer is active, we only sync mandatory and required Westend (and Westend BH) headers to Rococo BH.
-Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml
+Network: ../environments/rococo-westend/bridge_hub_rococo_local_network.toml
Creds: config
# step 1: initialize Rococo AH
@@ -9,7 +9,7 @@ asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub
bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds
# step 3: ensure that initialization has completed
-asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 600 seconds
+asset-hub-rococo-collator1: js-script ../js-helpers/wait-hrmp-channel-opened.js with "1013" within 600 seconds
# step 4: send message from Rococo to Westend
asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds
@@ -20,7 +20,7 @@ asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transf
bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds
# step 6: ensure that relayer won't sync any extra headers while delivering messages and confirmations
-bridge-hub-rococo-collator1: js-script ../helpers/only-required-headers-synced-when-active.js with "500,westend-at-rococo" within 600 seconds
+bridge-hub-rococo-collator1: js-script ../js-helpers/only-required-headers-synced-when-active.js with "500,westend-at-rococo" within 600 seconds
# wait until other network test has completed OR exit with an error too
asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl
deleted file mode 100644
index 82d1eee2f45cc12b60a85b829d4a4c17588fa9e7..0000000000000000000000000000000000000000
--- a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl
+++ /dev/null
@@ -1,39 +0,0 @@
-Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back
-Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml
-Creds: config
-
-# step 0: start relayer
-# (started by sibling 0001-asset-transfer-works-westend-to-rococo.zndsl test)
-
-# step 1: initialize Westend AH
-asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 60 seconds
-
-# step 2: initialize Westend bridge hub
-bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds
-
-# step 3: ensure that initialization has completed
-asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds
-
-# step 4: relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend
-bridge-hub-westend-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds
-
-# step 5: send WND to //Alice on Rococo AH
-# (that's a required part of a sibling 0001-asset-transfer-works-westend-to-rococo.zndsl test)
-asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds
-
-# step 6: elsewhere Rococo has sent ROC to //Alice - let's wait for it
-asset-hub-westend-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds
-
-# step 7: check that the relayer //Charlie is rewarded by both our AH and target AH
-bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726f,BridgedChain,0" within 300 seconds
-bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 300 seconds
-
-# step 8: send wROC back to Alice at Rococo AH
-asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 60 seconds
-
-# step 9: elsewhere Rococo has sent wWND to //Alice - let's wait for it
-# (we wait until //Alice account increases here - there are no other transactionc that may increase it)
-asset-hub-westend-collator1: js-script ../helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl
deleted file mode 100644
index acfe0df03d26779abf0dd3c2aa3dfc8f37c0e3aa..0000000000000000000000000000000000000000
--- a/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl
+++ /dev/null
@@ -1,39 +0,0 @@
-Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back
-Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml
-Creds: config
-
-# step 0: start relayer
-bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds
-
-# step 1: initialize Rococo AH
-asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 60 seconds
-
-# step 2: initialize Rococo bridge hub
-bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds
-
-# step 3: ensure that initialization has completed
-asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 600 seconds
-
-# step 4: relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo
-bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds
-
-# step 5: send ROC to //Alice on Westend AH
-# (that's a required part of a sibling 0001-asset-transfer-works-rococo-to-westend.zndsl test)
-asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds
-
-# step 6: elsewhere Westend has sent WND to //Alice - let's wait for it
-asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 600 seconds
-
-# step 7: check that the relayer //Charlie is rewarded by both our AH and target AH
-bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,BridgedChain,0" within 300 seconds
-bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 300 seconds
-
-# step 8: send wWND back to Alice at Westend AH
-asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 60 seconds
-
-# step 9: elsewhere Westend has sent wROC to //Alice - let's wait for it
-# (we wait until //Alice account increases here - there are no other transactionc that may increase it)
-asset-hub-rococo-collator1: js-script ../helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/zombienet/tests/0002-mandatory-headers-synced-while-idle-rococo-to-westend.zndsl b/bridges/zombienet/tests/0002-mandatory-headers-synced-while-idle-rococo-to-westend.zndsl
deleted file mode 100644
index eb6a75c373c7add04f895c01e332d40195150370..0000000000000000000000000000000000000000
--- a/bridges/zombienet/tests/0002-mandatory-headers-synced-while-idle-rococo-to-westend.zndsl
+++ /dev/null
@@ -1,26 +0,0 @@
-Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH.
-Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml
-Creds: config
-
-# step 1: initialize Westend bridge hub
-bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds
-
-# step 2: sleep some time before starting relayer. We want to sleep for at least 1 session, which is expected to
-# be 60 seconds for test environment.
-sleep 120 seconds
-
-# step 3: start relayer
-# (it is started by the sibling 0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl test file)
-
-# it also takes some time for relayer to initialize bridge, so let's sleep for 5 minutes to be sure that parachain
-# header has been synced
-
-# step 4: ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were
-# born while relay was offline and those in the next 100 seconds while script is active.
-bridge-hub-westend-collator1: js-script ../helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/bridges/zombienet/tests/0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl b/bridges/zombienet/tests/0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl
deleted file mode 100644
index 728d54d586a9b46625e3db70251b68c6501db922..0000000000000000000000000000000000000000
--- a/bridges/zombienet/tests/0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl
+++ /dev/null
@@ -1,26 +0,0 @@
-Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH.
-Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml
-Creds: config
-
-# step 1: initialize Rococo bridge hub
-bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds
-
-# step 2: sleep some time before starting relayer. We want to sleep for at least 1 session, which is expected to
-# be 60 seconds for test environment.
-sleep 120 seconds
-
-# step 3: start relayer
-bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds
-
-# it also takes some time for relayer to initialize bridge, so let's sleep for 5 minutes to be sure that parachain
-# header has been synced
-
-# step 4: ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were
-# born while relay was offline and those in the next 100 seconds while script is active.
-bridge-hub-rococo-collator1: js-script ../helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds
-
-# wait until other network test has completed OR exit with an error too
-asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds
diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml
index 0495eab9bd5bc2711e256a7cbb0c06ae13b1f2bc..eaf0d5d5d7f78e578644bf35f83d9543ad9af4bd 100644
--- a/cumulus/client/cli/Cargo.toml
+++ b/cumulus/client/cli/Cargo.toml
@@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
workspace = true
[dependencies]
-clap = { version = "4.4.18", features = ["derive"] }
+clap = { version = "4.5.1", features = ["derive"] }
codec = { package = "parity-scale-codec", version = "3.0.0" }
url = "2.4.0"
diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs
index 1807b8a1718e8b5c800b3bf27b58e0f39cd2948a..a7b2eb19de88a5c585ec3f6dfe5ad46ef0399b88 100644
--- a/cumulus/client/cli/src/lib.rs
+++ b/cumulus/client/cli/src/lib.rs
@@ -30,7 +30,7 @@ use codec::Encode;
use sc_chain_spec::ChainSpec;
use sc_client_api::HeaderBackend;
use sc_service::{
- config::{PrometheusConfig, TelemetryEndpoints},
+ config::{PrometheusConfig, RpcBatchRequestConfig, TelemetryEndpoints},
BasePath, TransactionPoolOptions,
};
use sp_core::hexdisplay::HexDisplay;
@@ -443,6 +443,14 @@ impl sc_cli::CliConfiguration for NormalizedRunCmd {
Ok(self.base.rpc_max_subscriptions_per_connection)
}
+ fn rpc_buffer_capacity_per_connection(&self) -> sc_cli::Result {
+ Ok(self.base.rpc_message_buffer_capacity_per_connection)
+ }
+
+ fn rpc_batch_config(&self) -> sc_cli::Result {
+ self.base.rpc_batch_config()
+ }
+
fn transaction_pool(&self, is_dev: bool) -> sc_cli::Result {
self.base.transaction_pool(is_dev)
}
diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs
index db0799235bca27aaa4456da6c8649b0b76fef030..5b7669c88f473b8765b6b343d1797aa707ed5916 100644
--- a/cumulus/client/consensus/aura/src/collator.rs
+++ b/cumulus/client/consensus/aura/src/collator.rs
@@ -258,6 +258,7 @@ where
pub struct SlotClaim {
author_pub: Pub,
pre_digest: DigestItem,
+ slot: Slot,
timestamp: Timestamp,
}
@@ -272,7 +273,7 @@ impl SlotClaim {
P::Public: Codec,
P::Signature: Codec,
{
- SlotClaim { author_pub, timestamp, pre_digest: aura_internal::pre_digest::(slot) }
+ SlotClaim { author_pub, timestamp, pre_digest: aura_internal::pre_digest::
(slot), slot }
}
/// Get the author's public key.
@@ -285,6 +286,11 @@ impl SlotClaim {
&self.pre_digest
}
+ /// Get the slot assigned to this claim.
+ pub fn slot(&self) -> Slot {
+ self.slot
+ }
+
/// Get the timestamp corresponding to the relay-chain slot this claim was
/// generated against.
pub fn timestamp(&self) -> Timestamp {
diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs
index 78f6b726aff0cb63cd08259c327bfbda71c05b8b..52b83254951f0e0ba0fd9ad5420d7faca2402066 100644
--- a/cumulus/client/consensus/aura/src/collators/basic.rs
+++ b/cumulus/client/consensus/aura/src/collators/basic.rs
@@ -33,12 +33,12 @@ use cumulus_relay_chain_interface::RelayChainInterface;
use polkadot_node_primitives::CollationResult;
use polkadot_overseer::Handle as OverseerHandle;
-use polkadot_primitives::{CollatorPair, Id as ParaId};
+use polkadot_primitives::{CollatorPair, Id as ParaId, ValidationCode};
use futures::{channel::mpsc::Receiver, prelude::*};
use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf};
use sc_consensus::BlockImport;
-use sp_api::ProvideRuntimeApi;
+use sp_api::{CallApiAt, ProvideRuntimeApi};
use sp_application_crypto::AppPublic;
use sp_blockchain::HeaderBackend;
use sp_consensus::SyncOracle;
@@ -47,6 +47,7 @@ use sp_core::crypto::Pair;
use sp_inherents::CreateInherentDataProviders;
use sp_keystore::KeystorePtr;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member};
+use sp_state_machine::Backend as _;
use std::{convert::TryFrom, sync::Arc, time::Duration};
use crate::collator as collator_util;
@@ -100,6 +101,7 @@ where
+ AuxStore
+ HeaderBackend
+ BlockBackend
+ + CallApiAt
+ Send
+ Sync
+ 'static,
@@ -141,6 +143,8 @@ where
collator_util::Collator::::new(params)
};
+ let mut last_processed_slot = 0;
+
while let Some(request) = collation_requests.next().await {
macro_rules! reject_with_error {
($err:expr) => {{
@@ -170,6 +174,22 @@ where
continue
}
+ let Ok(Some(code)) =
+ params.para_client.state_at(parent_hash).map_err(drop).and_then(|s| {
+ s.storage(&sp_core::storage::well_known_keys::CODE).map_err(drop)
+ })
+ else {
+ continue;
+ };
+
+ super::check_validation_code_or_log(
+ &ValidationCode::from(code).hash(),
+ params.para_id,
+ ¶ms.relay_client,
+ *request.relay_parent(),
+ )
+ .await;
+
let relay_parent_header =
match params.relay_client.header(RBlockId::hash(*request.relay_parent())).await {
Err(e) => reject_with_error!(e),
@@ -192,6 +212,18 @@ where
Err(e) => reject_with_error!(e),
};
+ // With async backing this function will be called every relay chain block.
+ //
+ // Most parachains currently run with 12 seconds slots and thus, they would try to
+ // produce multiple blocks per slot which very likely would fail on chain. Thus, we have
+ // this "hack" to only produce on block per slot.
+ //
+ // With https://github.com/paritytech/polkadot-sdk/issues/3168 this implementation will be
+ // obsolete and also the underlying issue will be fixed.
+ if last_processed_slot >= *claim.slot() {
+ continue
+ }
+
let (parachain_inherent_data, other_inherent_data) = try_request!(
collator
.create_inherent_data(
@@ -228,6 +260,8 @@ where
request.complete(None);
tracing::debug!(target: crate::LOG_TARGET, "No block proposal");
}
+
+ last_processed_slot = *claim.slot();
}
}
}
diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs
index e24b7f6f1c93b9bbe92cdf9ce5958194065862ae..161f10d55a193de35a2585e1a1f5725f30e19bf7 100644
--- a/cumulus/client/consensus/aura/src/collators/lookahead.rs
+++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs
@@ -59,7 +59,7 @@ use sp_api::ProvideRuntimeApi;
use sp_application_crypto::AppPublic;
use sp_blockchain::HeaderBackend;
use sp_consensus::SyncOracle;
-use sp_consensus_aura::{AuraApi, Slot, SlotDuration};
+use sp_consensus_aura::{AuraApi, Slot};
use sp_core::crypto::Pair;
use sp_inherents::CreateInherentDataProviders;
use sp_keystore::KeystorePtr;
@@ -95,8 +95,6 @@ pub struct Params {
pub para_id: ParaId,
/// A handle to the relay-chain client's "Overseer" or task orchestrator.
pub overseer_handle: OverseerHandle,
- /// The length of slots in this chain.
- pub slot_duration: SlotDuration,
/// The length of slots in the relay chain.
pub relay_chain_slot_duration: Duration,
/// The underlying block proposer this should call into.
@@ -214,26 +212,6 @@ where
},
};
- let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp(
- &relay_parent_header,
- params.relay_chain_slot_duration,
- ) {
- None => continue,
- Some((r_s, t)) => {
- let our_slot = Slot::from_timestamp(t, params.slot_duration);
- tracing::debug!(
- target: crate::LOG_TARGET,
- relay_slot = ?r_s,
- para_slot = ?our_slot,
- timestamp = ?t,
- slot_duration = ?params.slot_duration,
- relay_chain_slot_duration = ?params.relay_chain_slot_duration,
- "Adjusted relay-chain slot to parachain slot"
- );
- (our_slot, t)
- },
- };
-
let parent_search_params = ParentSearchParams {
relay_parent,
para_id: params.para_id,
@@ -272,14 +250,39 @@ where
let para_client = &*params.para_client;
let keystore = ¶ms.keystore;
let can_build_upon = |block_hash| {
- can_build_upon::<_, _, P>(
+ let slot_duration = match sc_consensus_aura::standalone::slot_duration_at(
+ &*params.para_client,
+ block_hash,
+ ) {
+ Ok(sd) => sd,
+ Err(err) => {
+ tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to acquire parachain slot duration");
+ return None
+ },
+ };
+ tracing::debug!(target: crate::LOG_TARGET, ?slot_duration, ?block_hash, "Parachain slot duration acquired");
+ let (relay_slot, timestamp) = consensus_common::relay_slot_and_timestamp(
+ &relay_parent_header,
+ params.relay_chain_slot_duration,
+ )?;
+ let slot_now = Slot::from_timestamp(timestamp, slot_duration);
+ tracing::debug!(
+ target: crate::LOG_TARGET,
+ ?relay_slot,
+ para_slot = ?slot_now,
+ ?timestamp,
+ ?slot_duration,
+ relay_chain_slot_duration = ?params.relay_chain_slot_duration,
+ "Adjusted relay-chain slot to parachain slot"
+ );
+ Some(can_build_upon::<_, _, P>(
slot_now,
timestamp,
block_hash,
included_block,
para_client,
&keystore,
- )
+ ))
};
// Sort by depth, ascending, to choose the longest chain.
@@ -287,10 +290,7 @@ where
// If the longest chain has space, build upon that. Otherwise, don't
// build at all.
potential_parents.sort_by_key(|a| a.depth);
- let initial_parent = match potential_parents.pop() {
- None => continue,
- Some(p) => p,
- };
+ let Some(initial_parent) = potential_parents.pop() else { continue };
// Build in a loop until not allowed. Note that the authorities can change
// at any block, so we need to re-claim our slot every time.
@@ -298,12 +298,19 @@ where
let mut parent_header = initial_parent.header;
let overseer_handle = &mut params.overseer_handle;
+ // We mainly call this to inform users at genesis if there is a mismatch with the
+ // on-chain data.
+ collator.collator_service().check_block_status(parent_hash, &parent_header);
+
// This needs to change to support elastic scaling, but for continuously
// scheduled chains this ensures that the backlog will grow steadily.
for n_built in 0..2 {
- let slot_claim = match can_build_upon(parent_hash).await {
+ let slot_claim = match can_build_upon(parent_hash) {
+ Some(fut) => match fut.await {
+ None => break,
+ Some(c) => c,
+ },
None => break,
- Some(c) => c,
};
tracing::debug!(
@@ -347,6 +354,14 @@ where
Some(v) => v,
};
+ super::check_validation_code_or_log(
+ &validation_code_hash,
+ params.para_id,
+ ¶ms.relay_client,
+ relay_parent,
+ )
+ .await;
+
match collator
.collate(
&parent_header,
diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs
index 4c7b759daf736f69de48b586b082a7d01534d7e3..6e0067d0cedb602face8943737f99f3cb1a201a3 100644
--- a/cumulus/client/consensus/aura/src/collators/mod.rs
+++ b/cumulus/client/consensus/aura/src/collators/mod.rs
@@ -20,5 +20,60 @@
//! included parachain block, as well as the [`lookahead`] collator, which prospectively
//! builds on parachain blocks which have not yet been included in the relay chain.
+use cumulus_relay_chain_interface::RelayChainInterface;
+use polkadot_primitives::{
+ Hash as RHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash,
+};
+
pub mod basic;
pub mod lookahead;
+
+/// Check the `local_validation_code_hash` against the validation code hash in the relay chain
+/// state.
+///
+/// If the code hashes do not match, it prints a warning.
+async fn check_validation_code_or_log(
+ local_validation_code_hash: &ValidationCodeHash,
+ para_id: ParaId,
+ relay_client: &impl RelayChainInterface,
+ relay_parent: RHash,
+) {
+ let state_validation_code_hash = match relay_client
+ .validation_code_hash(relay_parent, para_id, OccupiedCoreAssumption::Included)
+ .await
+ {
+ Ok(hash) => hash,
+ Err(error) => {
+ tracing::debug!(
+ target: super::LOG_TARGET,
+ %error,
+ ?relay_parent,
+ %para_id,
+ "Failed to fetch validation code hash",
+ );
+ return
+ },
+ };
+
+ match state_validation_code_hash {
+ Some(state) =>
+ if state != *local_validation_code_hash {
+ tracing::warn!(
+ target: super::LOG_TARGET,
+ %para_id,
+ ?relay_parent,
+ ?local_validation_code_hash,
+ relay_validation_code_hash = ?state,
+ "Parachain code doesn't match validation code stored in the relay chain state",
+ );
+ },
+ None => {
+ tracing::warn!(
+ target: super::LOG_TARGET,
+ %para_id,
+ ?relay_parent,
+ "Could not find validation code for parachain in the relay chain state.",
+ );
+ },
+ }
+}
diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs
index 6ededa7a92c11cb8c313f7da01017eeef256fb06..ed6f5bdd4d6984350c5f59a3753618c3a038f323 100644
--- a/cumulus/client/consensus/aura/src/lib.rs
+++ b/cumulus/client/consensus/aura/src/lib.rs
@@ -42,12 +42,22 @@ use sp_core::crypto::Pair;
use sp_inherents::CreateInherentDataProviders;
use sp_keystore::KeystorePtr;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, NumberFor};
-use std::{convert::TryFrom, marker::PhantomData, sync::Arc};
+use std::{
+ convert::TryFrom,
+ marker::PhantomData,
+ sync::{
+ atomic::{AtomicU64, Ordering},
+ Arc,
+ },
+};
mod import_queue;
pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, ImportQueueParams};
-pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion};
+pub use sc_consensus_aura::{
+ slot_duration, standalone::slot_duration_at, AuraVerifier, BuildAuraWorkerParams,
+ SlotProportion,
+};
pub use sc_consensus_slots::InherentDataProviderExt;
pub mod collator;
@@ -61,6 +71,7 @@ pub struct AuraConsensus {
create_inherent_data_providers: Arc,
aura_worker: Arc>,
slot_duration: SlotDuration,
+ last_slot_processed: Arc,
_phantom: PhantomData,
}
@@ -70,6 +81,7 @@ impl Clone for AuraConsensus {
create_inherent_data_providers: self.create_inherent_data_providers.clone(),
aura_worker: self.aura_worker.clone(),
slot_duration: self.slot_duration,
+ last_slot_processed: self.last_slot_processed.clone(),
_phantom: PhantomData,
}
}
@@ -156,6 +168,7 @@ where
Box::new(AuraConsensus {
create_inherent_data_providers: Arc::new(create_inherent_data_providers),
aura_worker: Arc::new(Mutex::new(worker)),
+ last_slot_processed: Default::default(),
slot_duration,
_phantom: PhantomData,
})
@@ -221,6 +234,18 @@ where
Some((validation_data.max_pov_size / 2) as usize),
);
+ // With async backing this function will be called every relay chain block.
+ //
+ // Most parachains currently run with 12 seconds slots and thus, they would try to produce
+ // multiple blocks per slot which very likely would fail on chain. Thus, we have this "hack"
+ // to only produce on block per slot.
+ //
+ // With https://github.com/paritytech/polkadot-sdk/issues/3168 this implementation will be
+ // obsolete and also the underlying issue will be fixed.
+ if self.last_slot_processed.fetch_max(*info.slot, Ordering::Relaxed) >= *info.slot {
+ return None
+ }
+
let res = self.aura_worker.lock().await.on_slot(info).await?;
Some(ParachainCandidate { block: res.block, proof: res.storage_proof })
diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml
index 7fee51310d01e2e6696c4cbb20ba10dade106dbc..5a014b10e35f39b0a5e00ca01da7cfd3ecc50a5f 100644
--- a/cumulus/client/consensus/common/Cargo.toml
+++ b/cumulus/client/consensus/common/Cargo.toml
@@ -14,7 +14,7 @@ async-trait = "0.1.74"
codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] }
dyn-clone = "1.0.16"
futures = "0.3.28"
-log = "0.4.20"
+log = { workspace = true, default-features = true }
tracing = "0.1.37"
# Substrate
diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs
index 597d1ab2acc2cff42d3230898c1129a7ba63b6f3..bfb95ae388ae3cd31f5035a9c6195631adbb8809 100644
--- a/cumulus/client/consensus/common/src/tests.rs
+++ b/cumulus/client/consensus/common/src/tests.rs
@@ -136,6 +136,15 @@ impl RelayChainInterface for Relaychain {
Ok(Some(PersistedValidationData { parent_head, ..Default::default() }))
}
+ async fn validation_code_hash(
+ &self,
+ _: PHash,
+ _: ParaId,
+ _: OccupiedCoreAssumption,
+ ) -> RelayChainResult