Skip to content
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Everything about outgoing messages sending.
use crate::{Config, LOG_TARGET};
use bp_messages::{DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer};
use codec::{Decode, Encode};
use frame_support::{
weights::{RuntimeDbWeight, Weight},
BoundedVec, PalletError,
};
use num_traits::Zero;
use scale_info::TypeInfo;
use sp_runtime::RuntimeDebug;
use sp_std::collections::vec_deque::VecDeque;
/// Outbound lane storage.
pub trait OutboundLaneStorage {
type StoredMessagePayload;
/// Lane id.
fn id(&self) -> LaneId;
/// Get lane data from the storage.
fn data(&self) -> OutboundLaneData;
/// Update lane data in the storage.
fn set_data(&mut self, data: OutboundLaneData);
/// Returns saved outbound message payload.
#[cfg(test)]
fn message(&self, nonce: &MessageNonce) -> Option<Self::StoredMessagePayload>;
/// Save outbound message in the storage.
fn save_message(&mut self, nonce: MessageNonce, message_payload: Self::StoredMessagePayload);
/// Remove outbound message from the storage.
fn remove_message(&mut self, nonce: &MessageNonce);
}
/// Outbound message data wrapper that implements `MaxEncodedLen`.
pub type StoredMessagePayload<T, I> = BoundedVec<u8, <T as Config<I>>::MaximalOutboundPayloadSize>;
/// Result of messages receival confirmation.
#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)]
pub enum ReceivalConfirmationError {
/// Bridged chain is trying to confirm more messages than we have generated. May be a result
/// of invalid bridged chain storage.
FailedToConfirmFutureMessages,
/// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged
/// chain storage.
EmptyUnrewardedRelayerEntry,
/// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid
/// bridged chain storage.
NonConsecutiveUnrewardedRelayerEntries,
/// The chain has more messages that need to be confirmed than there is in the proof.
TryingToConfirmMoreMessagesThanExpected,
}
/// Outbound messages lane.
pub struct OutboundLane<S> {
storage: S,
}
impl<S: OutboundLaneStorage> OutboundLane<S> {
/// Create new outbound lane backed by given storage.
pub fn new(storage: S) -> Self {
OutboundLane { storage }
}
/// Get this lane data.
pub fn data(&self) -> OutboundLaneData {
self.storage.data()
}
/// Send message over lane.
///
/// Returns new message nonce.
pub fn send_message(&mut self, message_payload: S::StoredMessagePayload) -> MessageNonce {
let mut data = self.storage.data();
let nonce = data.latest_generated_nonce + 1;
data.latest_generated_nonce = nonce;
self.storage.save_message(nonce, message_payload);
self.storage.set_data(data);
nonce
}
/// Confirm messages delivery.
pub fn confirm_delivery<RelayerId>(
&mut self,
max_allowed_messages: MessageNonce,
latest_delivered_nonce: MessageNonce,
relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
) -> Result<Option<DeliveredMessages>, ReceivalConfirmationError> {
let mut data = self.storage.data();
let confirmed_messages = DeliveredMessages {
begin: data.latest_received_nonce.saturating_add(1),
end: latest_delivered_nonce,
};
if confirmed_messages.total_messages() == 0 {
return Ok(None)
}
if confirmed_messages.end > data.latest_generated_nonce {
return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages)
}
if confirmed_messages.total_messages() > max_allowed_messages {
// that the relayer has declared correct number of messages that the proof contains (it
// is checked outside of the function). But it may happen (but only if this/bridged
// chain storage is corrupted, though) that the actual number of confirmed messages if
// larger than declared. This would mean that 'reward loop' will take more time than the
// weight formula accounts, so we can't allow that.
log::trace!(
target: LOG_TARGET,
"Messages delivery proof contains too many messages to confirm: {} vs declared {}",
confirmed_messages.total_messages(),
max_allowed_messages,
);
return Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected)
}
ensure_unrewarded_relayers_are_correct(confirmed_messages.end, relayers)?;
data.latest_received_nonce = confirmed_messages.end;
self.storage.set_data(data);
Ok(Some(confirmed_messages))
}
/// Prune at most `max_messages_to_prune` already received messages.
///
/// Returns weight, consumed by messages pruning and lane state update.
pub fn prune_messages(
&mut self,
db_weight: RuntimeDbWeight,
mut remaining_weight: Weight,
) -> Weight {
let write_weight = db_weight.writes(1);
let two_writes_weight = write_weight + write_weight;
let mut spent_weight = Weight::zero();
let mut data = self.storage.data();
while remaining_weight.all_gte(two_writes_weight) &&
data.oldest_unpruned_nonce <= data.latest_received_nonce
{
self.storage.remove_message(&data.oldest_unpruned_nonce);
spent_weight += write_weight;
remaining_weight -= write_weight;
data.oldest_unpruned_nonce += 1;
}
if !spent_weight.is_zero() {
spent_weight += write_weight;
self.storage.set_data(data);
}
spent_weight
}
}
/// Verifies unrewarded relayers vec.
///
/// Returns `Err(_)` if unrewarded relayers vec contains invalid data, meaning that the bridged
/// chain has invalid runtime storage.
fn ensure_unrewarded_relayers_are_correct<RelayerId>(
latest_received_nonce: MessageNonce,
relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
) -> Result<(), ReceivalConfirmationError> {
let mut expected_entry_begin = relayers.front().map(|entry| entry.messages.begin);
for entry in relayers {
// unrewarded relayer entry must have at least 1 unconfirmed message
// (guaranteed by the `InboundLane::receive_message()`)
if entry.messages.end < entry.messages.begin {
return Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry)
}
// every entry must confirm range of messages that follows previous entry range
// (guaranteed by the `InboundLane::receive_message()`)
if expected_entry_begin != Some(entry.messages.begin) {
return Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries)
}
expected_entry_begin = entry.messages.end.checked_add(1);
// entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()`
// (guaranteed by the `InboundLane::receive_message()`)
if entry.messages.end > latest_received_nonce {
return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages)
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
mock::{
outbound_message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime,
REGULAR_PAYLOAD, TEST_LANE_ID,
},
outbound_lane,
};
use frame_support::weights::constants::RocksDbWeight;
use sp_std::ops::RangeInclusive;
fn unrewarded_relayers(
nonces: RangeInclusive<MessageNonce>,
) -> VecDeque<UnrewardedRelayer<TestRelayer>> {
vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)]
.into_iter()
.collect()
}
fn delivered_messages(nonces: RangeInclusive<MessageNonce>) -> DeliveredMessages {
DeliveredMessages { begin: *nonces.start(), end: *nonces.end() }
}
fn assert_3_messages_confirmation_fails(
latest_received_nonce: MessageNonce,
relayers: &VecDeque<UnrewardedRelayer<TestRelayer>>,
) -> Result<Option<DeliveredMessages>, ReceivalConfirmationError> {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
let result = lane.confirm_delivery(3, latest_received_nonce, relayers);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
result
})
}
#[test]
fn send_message_works() {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert_eq!(lane.storage.data().latest_generated_nonce, 0);
assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 1);
assert!(lane.storage.message(&1).is_some());
assert_eq!(lane.storage.data().latest_generated_nonce, 1);
});
}
#[test]
fn confirm_delivery_works() {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 1);
assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 2);
assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 3);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
assert_eq!(
lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
Ok(Some(delivered_messages(1..=3))),
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
});
}
#[test]
fn confirm_delivery_rejects_nonce_lesser_than_latest_received() {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
assert_eq!(
lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
Ok(Some(delivered_messages(1..=3))),
);
assert_eq!(lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(None),);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
assert_eq!(lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), Ok(None),);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
});
}
#[test]
fn confirm_delivery_rejects_nonce_larger_than_last_generated() {
assert_eq!(
assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),),
Err(ReceivalConfirmationError::FailedToConfirmFutureMessages),
);
}
#[test]
fn confirm_delivery_fails_if_entry_confirms_future_messages() {
assert_eq!(
assert_3_messages_confirmation_fails(
3,
&unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(2..=30).into_iter())
.chain(unrewarded_relayers(3..=3).into_iter())
.collect(),
),
Err(ReceivalConfirmationError::FailedToConfirmFutureMessages),
);
}
#[test]
#[allow(clippy::reversed_empty_ranges)]
fn confirm_delivery_fails_if_entry_is_empty() {
assert_eq!(
assert_3_messages_confirmation_fails(
3,
&unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(2..=1).into_iter())
.chain(unrewarded_relayers(2..=3).into_iter())
.collect(),
),
Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry),
);
}
#[test]
fn confirm_delivery_fails_if_entries_are_non_consecutive() {
assert_eq!(
assert_3_messages_confirmation_fails(
3,
&unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(3..=3).into_iter())
.chain(unrewarded_relayers(2..=2).into_iter())
.collect(),
),
Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries),
);
}
#[test]
fn prune_messages_works() {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
// when lane is empty, nothing is pruned
assert_eq!(
lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
Weight::zero()
);
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
// when nothing is confirmed, nothing is pruned
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
assert!(lane.storage.message(&1).is_some());
assert!(lane.storage.message(&2).is_some());
assert!(lane.storage.message(&3).is_some());
assert_eq!(
lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
Weight::zero()
);
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
// after confirmation, some messages are received
assert_eq!(
lane.confirm_delivery(2, 2, &unrewarded_relayers(1..=2)),
Ok(Some(delivered_messages(1..=2))),
);
assert_eq!(
lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
RocksDbWeight::get().writes(3),
);
assert!(lane.storage.message(&1).is_none());
assert!(lane.storage.message(&2).is_none());
assert!(lane.storage.message(&3).is_some());
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3);
// after last message is confirmed, everything is pruned
assert_eq!(
lane.confirm_delivery(1, 3, &unrewarded_relayers(3..=3)),
Ok(Some(delivered_messages(3..=3))),
);
assert_eq!(
lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)),
RocksDbWeight::get().writes(2),
);
assert!(lane.storage.message(&1).is_none());
assert!(lane.storage.message(&2).is_none());
assert!(lane.storage.message(&3).is_none());
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
});
}
#[test]
fn confirm_delivery_detects_when_more_than_expected_messages_are_confirmed() {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
lane.send_message(outbound_message_data(REGULAR_PAYLOAD));
assert_eq!(
lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)),
Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected),
);
assert_eq!(
lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)),
Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected),
);
assert_eq!(
lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
Ok(Some(delivered_messages(1..=3))),
);
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Autogenerated weights for pallet_bridge_messages
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// target/release/unknown-bridge-node
// benchmark
// pallet
// --chain=dev
// --steps=50
// --repeat=20
// --pallet=pallet_bridge_messages
// --extrinsic=*
// --execution=wasm
// --wasm-execution=Compiled
// --heap-pages=4096
// --output=./modules/messages/src/weights.rs
// --template=./.maintain/bridge-weight-template.hbs
#![allow(clippy::all)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{
traits::Get,
weights::{constants::RocksDbWeight, Weight},
};
use sp_std::marker::PhantomData;
/// Weight functions needed for pallet_bridge_messages.
pub trait WeightInfo {
fn receive_single_message_proof() -> Weight;
fn receive_two_messages_proof() -> Weight;
fn receive_single_message_proof_with_outbound_lane_state() -> Weight;
fn receive_single_message_proof_1_kb() -> Weight;
fn receive_single_message_proof_16_kb() -> Weight;
fn receive_delivery_proof_for_single_message() -> Weight;
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight;
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight;
fn receive_single_message_proof_with_dispatch(i: u32) -> Weight;
}
/// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets.
///
/// Those weights are test only and must never be used in production.
pub struct BridgeWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for BridgeWeight<T> {
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 52_321 nanoseconds.
Weight::from_parts(54_478_000, 57170)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_two_messages_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 64_597 nanoseconds.
Weight::from_parts(69_267_000, 57170)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof_with_outbound_lane_state() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 64_079 nanoseconds.
Weight::from_parts(65_905_000, 57170)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof_1_kb() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 50_588 nanoseconds.
Weight::from_parts(53_544_000, 57170)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof_16_kb() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 78_269 nanoseconds.
Weight::from_parts(81_748_000, 57170)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added:
/// 539, mode: MaxEncodedLen)
///
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn receive_delivery_proof_for_single_message() -> Weight {
// Proof Size summary in bytes:
// Measured: `579`
// Estimated: `9584`
// Minimum execution time: 45_786 nanoseconds.
Weight::from_parts(47_382_000, 9584)
.saturating_add(T::DbWeight::get().reads(4_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added:
/// 539, mode: MaxEncodedLen)
///
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
// Proof Size summary in bytes:
// Measured: `596`
// Estimated: `9584`
// Minimum execution time: 44_544 nanoseconds.
Weight::from_parts(45_451_000, 9584)
.saturating_add(T::DbWeight::get().reads(4_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added:
/// 539, mode: MaxEncodedLen)
///
/// Storage: BridgeRelayers RelayerRewards (r:2 w:2)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
// Proof Size summary in bytes:
// Measured: `596`
// Estimated: `12124`
// Minimum execution time: 47_344 nanoseconds.
Weight::from_parts(48_311_000, 12124)
.saturating_add(T::DbWeight::get().reads(5_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
///
/// The range of component `i` is `[128, 2048]`.
fn receive_single_message_proof_with_dispatch(i: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 52_385 nanoseconds.
Weight::from_parts(54_919_468, 57170)
// Standard Error: 108
.saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into()))
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 52_321 nanoseconds.
Weight::from_parts(54_478_000, 57170)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_two_messages_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 64_597 nanoseconds.
Weight::from_parts(69_267_000, 57170)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof_with_outbound_lane_state() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 64_079 nanoseconds.
Weight::from_parts(65_905_000, 57170)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof_1_kb() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 50_588 nanoseconds.
Weight::from_parts(53_544_000, 57170)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
fn receive_single_message_proof_16_kb() -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 78_269 nanoseconds.
Weight::from_parts(81_748_000, 57170)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added:
/// 539, mode: MaxEncodedLen)
///
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn receive_delivery_proof_for_single_message() -> Weight {
// Proof Size summary in bytes:
// Measured: `579`
// Estimated: `9584`
// Minimum execution time: 45_786 nanoseconds.
Weight::from_parts(47_382_000, 9584)
.saturating_add(RocksDbWeight::get().reads(4_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added:
/// 539, mode: MaxEncodedLen)
///
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
// Proof Size summary in bytes:
// Measured: `596`
// Estimated: `9584`
// Minimum execution time: 44_544 nanoseconds.
Weight::from_parts(45_451_000, 9584)
.saturating_add(RocksDbWeight::get().reads(4_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added:
/// 539, mode: MaxEncodedLen)
///
/// Storage: BridgeRelayers RelayerRewards (r:2 w:2)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
// Proof Size summary in bytes:
// Measured: `596`
// Estimated: `12124`
// Minimum execution time: 47_344 nanoseconds.
Weight::from_parts(48_311_000, 12124)
.saturating_add(RocksDbWeight::get().reads(5_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
}
/// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2),
/// added: 497, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1)
///
/// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added:
/// 51655, mode: MaxEncodedLen)
///
/// The range of component `i` is `[128, 2048]`.
fn receive_single_message_proof_with_dispatch(i: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `618`
// Estimated: `57170`
// Minimum execution time: 52_385 nanoseconds.
Weight::from_parts(54_919_468, 57170)
// Standard Error: 108
.saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into()))
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Weight-related utilities.
use crate::weights::WeightInfo;
use bp_messages::{MessageNonce, UnrewardedRelayersState};
use bp_runtime::{PreComputedSize, Size};
use frame_support::weights::Weight;
/// Size of the message being delivered in benchmarks.
pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128;
/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of
/// calls we're checking here would fit 1KB.
const SIGNED_EXTENSIONS_SIZE: u32 = 1024;
/// Number of extra bytes (excluding size of storage value itself) of storage proof.
/// This mostly depends on number of entries (and their density) in the storage trie.
/// Some reserve is reserved to account future chain growth.
pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024;
/// Ensure that weights from `WeightInfoExt` implementation are looking correct.
pub fn ensure_weights_are_correct<W: WeightInfoExt>() {
// all components of weight formulae must have zero `proof_size`, because the `proof_size` is
// benchmarked using `MaxEncodedLen` approach and there are no components that cause additional
// db reads
// verify `receive_messages_proof` weight components
assert_ne!(W::receive_messages_proof_overhead().ref_time(), 0);
assert_ne!(W::receive_messages_proof_overhead().proof_size(), 0);
// W::receive_messages_proof_messages_overhead(1).ref_time() may be zero because:
// the message processing code (`InboundLane::receive_message`) is minimal and may not be
// accounted by our benchmarks
assert_eq!(W::receive_messages_proof_messages_overhead(1).proof_size(), 0);
// W::receive_messages_proof_outbound_lane_state_overhead().ref_time() may be zero because:
// the outbound lane state processing code (`InboundLane::receive_state_update`) is minimal and
// may not be accounted by our benchmarks
assert_eq!(W::receive_messages_proof_outbound_lane_state_overhead().proof_size(), 0);
assert_ne!(W::storage_proof_size_overhead(1).ref_time(), 0);
assert_eq!(W::storage_proof_size_overhead(1).proof_size(), 0);
// verify `receive_messages_delivery_proof` weight components
assert_ne!(W::receive_messages_delivery_proof_overhead().ref_time(), 0);
assert_ne!(W::receive_messages_delivery_proof_overhead().proof_size(), 0);
// W::receive_messages_delivery_proof_messages_overhead(1).ref_time() may be zero because:
// there's no code that iterates over confirmed messages in confirmation transaction
assert_eq!(W::receive_messages_delivery_proof_messages_overhead(1).proof_size(), 0);
// W::receive_messages_delivery_proof_relayers_overhead(1).ref_time() may be zero because:
// runtime **can** choose not to pay any rewards to relayers
// W::receive_messages_delivery_proof_relayers_overhead(1).proof_size() is an exception
// it may or may not cause additional db reads, so proof size may vary
assert_ne!(W::storage_proof_size_overhead(1).ref_time(), 0);
assert_eq!(W::storage_proof_size_overhead(1).proof_size(), 0);
// verify `receive_message_proof` weight
let receive_messages_proof_weight =
W::receive_messages_proof_weight(&PreComputedSize(1), 10, Weight::zero());
assert_ne!(receive_messages_proof_weight.ref_time(), 0);
assert_ne!(receive_messages_proof_weight.proof_size(), 0);
messages_proof_size_does_not_affect_proof_size::<W>();
messages_count_does_not_affect_proof_size::<W>();
// verify `receive_message_proof` weight
let receive_messages_delivery_proof_weight = W::receive_messages_delivery_proof_weight(
&PreComputedSize(1),
&UnrewardedRelayersState::default(),
);
assert_ne!(receive_messages_delivery_proof_weight.ref_time(), 0);
assert_ne!(receive_messages_delivery_proof_weight.proof_size(), 0);
messages_delivery_proof_size_does_not_affect_proof_size::<W>();
total_messages_in_delivery_proof_does_not_affect_proof_size::<W>();
}
/// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain.
pub fn ensure_able_to_receive_message<W: WeightInfoExt>(
max_extrinsic_size: u32,
max_extrinsic_weight: Weight,
max_incoming_message_proof_size: u32,
max_incoming_message_dispatch_weight: Weight,
) {
// verify that we're able to receive proof of maximal-size message
let max_delivery_transaction_size =
max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE);
assert!(
max_delivery_transaction_size <= max_extrinsic_size,
"Size of maximal message delivery transaction {max_incoming_message_proof_size} + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}",
);
// verify that we're able to receive proof of maximal-size message with maximal dispatch weight
let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight(
&PreComputedSize(
(max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize,
),
1,
max_incoming_message_dispatch_weight,
);
assert!(
max_delivery_transaction_dispatch_weight.all_lte(max_extrinsic_weight),
"Weight of maximal message delivery transaction + {max_delivery_transaction_dispatch_weight} is larger than maximal possible transaction weight {max_extrinsic_weight}",
);
}
/// Ensure that we're able to receive maximal confirmation from other chain.
pub fn ensure_able_to_receive_confirmation<W: WeightInfoExt>(
max_extrinsic_size: u32,
max_extrinsic_weight: Weight,
max_inbound_lane_data_proof_size_from_peer_chain: u32,
max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce,
max_unconfirmed_messages_at_inbound_lane: MessageNonce,
) {
// verify that we're able to receive confirmation of maximal-size
let max_confirmation_transaction_size =
max_inbound_lane_data_proof_size_from_peer_chain.saturating_add(SIGNED_EXTENSIONS_SIZE);
assert!(
max_confirmation_transaction_size <= max_extrinsic_size,
"Size of maximal message delivery confirmation transaction {max_inbound_lane_data_proof_size_from_peer_chain} + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}",
);
// verify that we're able to reward maximal number of relayers that have delivered maximal
// number of messages
let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight(
&PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize),
&UnrewardedRelayersState {
unrewarded_relayer_entries: max_unrewarded_relayer_entries_at_peer_inbound_lane,
total_messages: max_unconfirmed_messages_at_inbound_lane,
..Default::default()
},
);
assert!(
max_confirmation_transaction_dispatch_weight.all_lte(max_extrinsic_weight),
"Weight of maximal confirmation transaction {max_confirmation_transaction_dispatch_weight} is larger than maximal possible transaction weight {max_extrinsic_weight}",
);
}
/// Panics if `proof_size` of message delivery call depends on the message proof size.
fn messages_proof_size_does_not_affect_proof_size<W: WeightInfoExt>() {
let dispatch_weight = Weight::zero();
let weight_when_proof_size_is_8k =
W::receive_messages_proof_weight(&PreComputedSize(8 * 1024), 1, dispatch_weight);
let weight_when_proof_size_is_16k =
W::receive_messages_proof_weight(&PreComputedSize(16 * 1024), 1, dispatch_weight);
ensure_weight_components_are_not_zero(weight_when_proof_size_is_8k);
ensure_weight_components_are_not_zero(weight_when_proof_size_is_16k);
ensure_proof_size_is_the_same(
weight_when_proof_size_is_8k,
weight_when_proof_size_is_16k,
"Messages proof size does not affect values that we read from our storage",
);
}
/// Panics if `proof_size` of message delivery call depends on the messages count.
///
/// In practice, it will depend on the messages count, because most probably every
/// message will read something from db during dispatch. But this must be accounted
/// by the `dispatch_weight`.
fn messages_count_does_not_affect_proof_size<W: WeightInfoExt>() {
let messages_proof_size = PreComputedSize(8 * 1024);
let dispatch_weight = Weight::zero();
let weight_of_one_incoming_message =
W::receive_messages_proof_weight(&messages_proof_size, 1, dispatch_weight);
let weight_of_two_incoming_messages =
W::receive_messages_proof_weight(&messages_proof_size, 2, dispatch_weight);
ensure_weight_components_are_not_zero(weight_of_one_incoming_message);
ensure_weight_components_are_not_zero(weight_of_two_incoming_messages);
ensure_proof_size_is_the_same(
weight_of_one_incoming_message,
weight_of_two_incoming_messages,
"Number of same-lane incoming messages does not affect values that we read from our storage",
);
}
/// Panics if `proof_size` of delivery confirmation call depends on the delivery proof size.
fn messages_delivery_proof_size_does_not_affect_proof_size<W: WeightInfoExt>() {
let relayers_state = UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1,
last_delivered_nonce: 1,
};
let weight_when_proof_size_is_8k =
W::receive_messages_delivery_proof_weight(&PreComputedSize(8 * 1024), &relayers_state);
let weight_when_proof_size_is_16k =
W::receive_messages_delivery_proof_weight(&PreComputedSize(16 * 1024), &relayers_state);
ensure_weight_components_are_not_zero(weight_when_proof_size_is_8k);
ensure_weight_components_are_not_zero(weight_when_proof_size_is_16k);
ensure_proof_size_is_the_same(
weight_when_proof_size_is_8k,
weight_when_proof_size_is_16k,
"Messages delivery proof size does not affect values that we read from our storage",
);
}
/// Panics if `proof_size` of delivery confirmation call depends on the number of confirmed
/// messages.
fn total_messages_in_delivery_proof_does_not_affect_proof_size<W: WeightInfoExt>() {
let proof_size = PreComputedSize(8 * 1024);
let weight_when_1k_messages_confirmed = W::receive_messages_delivery_proof_weight(
&proof_size,
&UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 1024,
last_delivered_nonce: 1,
},
);
let weight_when_2k_messages_confirmed = W::receive_messages_delivery_proof_weight(
&proof_size,
&UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
messages_in_oldest_entry: 1,
total_messages: 2048,
last_delivered_nonce: 1,
},
);
ensure_weight_components_are_not_zero(weight_when_1k_messages_confirmed);
ensure_weight_components_are_not_zero(weight_when_2k_messages_confirmed);
ensure_proof_size_is_the_same(
weight_when_1k_messages_confirmed,
weight_when_2k_messages_confirmed,
"More messages in delivery proof does not affect values that we read from our storage",
);
}
/// Panics if either Weight' `proof_size` or `ref_time` are zero.
fn ensure_weight_components_are_not_zero(weight: Weight) {
assert_ne!(weight.ref_time(), 0);
assert_ne!(weight.proof_size(), 0);
}
/// Panics if `proof_size` of `weight1` is not equal to `proof_size` of `weight2`.
fn ensure_proof_size_is_the_same(weight1: Weight, weight2: Weight, msg: &str) {
assert_eq!(
weight1.proof_size(),
weight2.proof_size(),
"{msg}: {} must be equal to {}",
weight1.proof_size(),
weight2.proof_size(),
);
}
/// Extended weight info.
pub trait WeightInfoExt: WeightInfo {
/// Size of proof that is already included in the single message delivery weight.
///
/// The message submitter (at source chain) has already covered this cost. But there are two
/// factors that may increase proof size: (1) the message size may be larger than predefined
/// and (2) relayer may add extra trie nodes to the proof. So if proof size is larger than
/// this value, we're going to charge relayer for that.
fn expected_extra_storage_proof_size() -> u32;
// Our configuration assumes that the runtime has special signed extensions used to:
//
// 1) reject obsolete delivery and confirmation transactions;
//
// 2) refund transaction cost to relayer and register his rewards.
//
// The checks in (1) are trivial, so its computation weight may be ignored. And we only touch
// storage values that are read during the call. So we may ignore the weight of this check.
//
// However, during (2) we read and update storage values of other pallets
// (`pallet-bridge-relayers` and balances/assets pallet). So we need to add this weight to the
// weight of our call. Hence two following methods.
/// Extra weight that is added to the `receive_messages_proof` call weight by signed extensions
/// that are declared at runtime level.
fn receive_messages_proof_overhead_from_runtime() -> Weight;
/// Extra weight that is added to the `receive_messages_delivery_proof` call weight by signed
/// extensions that are declared at runtime level.
fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight;
// Functions that are directly mapped to extrinsics weights.
/// Weight of message delivery extrinsic.
fn receive_messages_proof_weight(
proof: &impl Size,
messages_count: u32,
dispatch_weight: Weight,
) -> Weight {
// basic components of extrinsic weight
let transaction_overhead = Self::receive_messages_proof_overhead();
let transaction_overhead_from_runtime =
Self::receive_messages_proof_overhead_from_runtime();
let outbound_state_delivery_weight =
Self::receive_messages_proof_outbound_lane_state_overhead();
let messages_delivery_weight =
Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count));
let messages_dispatch_weight = dispatch_weight;
// proof size overhead weight
let expected_proof_size = EXPECTED_DEFAULT_MESSAGE_LENGTH
.saturating_mul(messages_count.saturating_sub(1))
.saturating_add(Self::expected_extra_storage_proof_size());
let actual_proof_size = proof.size();
let proof_size_overhead = Self::storage_proof_size_overhead(
actual_proof_size.saturating_sub(expected_proof_size),
);
transaction_overhead
.saturating_add(transaction_overhead_from_runtime)
.saturating_add(outbound_state_delivery_weight)
.saturating_add(messages_delivery_weight)
.saturating_add(messages_dispatch_weight)
.saturating_add(proof_size_overhead)
}
/// Weight of confirmation delivery extrinsic.
fn receive_messages_delivery_proof_weight(
proof: &impl Size,
relayers_state: &UnrewardedRelayersState,
) -> Weight {
// basic components of extrinsic weight
let transaction_overhead = Self::receive_messages_delivery_proof_overhead();
let transaction_overhead_from_runtime =
Self::receive_messages_delivery_proof_overhead_from_runtime();
let messages_overhead =
Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages);
let relayers_overhead = Self::receive_messages_delivery_proof_relayers_overhead(
relayers_state.unrewarded_relayer_entries,
);
// proof size overhead weight
let expected_proof_size = Self::expected_extra_storage_proof_size();
let actual_proof_size = proof.size();
let proof_size_overhead = Self::storage_proof_size_overhead(
actual_proof_size.saturating_sub(expected_proof_size),
);
transaction_overhead
.saturating_add(transaction_overhead_from_runtime)
.saturating_add(messages_overhead)
.saturating_add(relayers_overhead)
.saturating_add(proof_size_overhead)
}
// Functions that are used by extrinsics weights formulas.
/// Returns weight overhead of message delivery transaction (`receive_messages_proof`).
fn receive_messages_proof_overhead() -> Weight {
let weight_of_two_messages_and_two_tx_overheads =
Self::receive_single_message_proof().saturating_mul(2);
let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof();
weight_of_two_messages_and_two_tx_overheads
.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
}
/// Returns weight that needs to be accounted when receiving given a number of messages with
/// message delivery transaction (`receive_messages_proof`).
fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight {
let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof();
let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof();
weight_of_two_messages_and_single_tx_overhead
.saturating_sub(weight_of_single_message_and_single_tx_overhead)
.saturating_mul(messages as _)
}
/// Returns weight that needs to be accounted when message delivery transaction
/// (`receive_messages_proof`) is carrying outbound lane state proof.
fn receive_messages_proof_outbound_lane_state_overhead() -> Weight {
let weight_of_single_message_and_lane_state =
Self::receive_single_message_proof_with_outbound_lane_state();
let weight_of_single_message = Self::receive_single_message_proof();
weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message)
}
/// Returns weight overhead of delivery confirmation transaction
/// (`receive_messages_delivery_proof`).
fn receive_messages_delivery_proof_overhead() -> Weight {
let weight_of_two_messages_and_two_tx_overheads =
Self::receive_delivery_proof_for_single_message().saturating_mul(2);
let weight_of_two_messages_and_single_tx_overhead =
Self::receive_delivery_proof_for_two_messages_by_single_relayer();
weight_of_two_messages_and_two_tx_overheads
.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
}
/// Returns weight that needs to be accounted when receiving confirmations for given a number of
/// messages with delivery confirmation transaction (`receive_messages_delivery_proof`).
fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight {
let weight_of_two_messages =
Self::receive_delivery_proof_for_two_messages_by_single_relayer();
let weight_of_single_message = Self::receive_delivery_proof_for_single_message();
weight_of_two_messages
.saturating_sub(weight_of_single_message)
.saturating_mul(messages as _)
}
/// Returns weight that needs to be accounted when receiving confirmations for given a number of
/// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`).
fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight {
let weight_of_two_messages_by_two_relayers =
Self::receive_delivery_proof_for_two_messages_by_two_relayers();
let weight_of_two_messages_by_single_relayer =
Self::receive_delivery_proof_for_two_messages_by_single_relayer();
weight_of_two_messages_by_two_relayers
.saturating_sub(weight_of_two_messages_by_single_relayer)
.saturating_mul(relayers as _)
}
/// Returns weight that needs to be accounted when storage proof of given size is received
/// (either in `receive_messages_proof` or `receive_messages_delivery_proof`).
///
/// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof
/// size depends on messages count or number of entries in the unrewarded relayers set. So this
/// shouldn't be added to cost of transaction, but instead should act as a minimal cost that the
/// relayer must pay when it relays proof of given size (even if cost based on other parameters
/// is less than that cost).
fn storage_proof_size_overhead(proof_size: u32) -> Weight {
let proof_size_in_bytes = proof_size;
let byte_weight = (Self::receive_single_message_proof_16_kb() -
Self::receive_single_message_proof_1_kb()) /
(15 * 1024);
proof_size_in_bytes * byte_weight
}
// Functions that may be used by runtime developers.
/// Returns dispatch weight of message of given size.
///
/// This function would return correct value only if your runtime is configured to run
/// `receive_single_message_proof_with_dispatch` benchmark. See its requirements for
/// details.
fn message_dispatch_weight(message_size: u32) -> Weight {
// There may be a tiny overweight/underweight here, because we don't account how message
// size affects all steps before dispatch. But the effect should be small enough and we
// may ignore it.
Self::receive_single_message_proof_with_dispatch(message_size)
.saturating_sub(Self::receive_single_message_proof())
}
}
impl WeightInfoExt for () {
fn expected_extra_storage_proof_size() -> u32 {
EXTRA_STORAGE_PROOF_SIZE
}
fn receive_messages_proof_overhead_from_runtime() -> Weight {
Weight::zero()
}
fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight {
Weight::zero()
}
}
impl<T: frame_system::Config> WeightInfoExt for crate::weights::BridgeWeight<T> {
fn expected_extra_storage_proof_size() -> u32 {
EXTRA_STORAGE_PROOF_SIZE
}
fn receive_messages_proof_overhead_from_runtime() -> Weight {
Weight::zero()
}
fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight {
Weight::zero()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{mock::TestRuntime, weights::BridgeWeight};
#[test]
fn ensure_default_weights_are_correct() {
ensure_weights_are_correct::<BridgeWeight<TestRuntime>>();
}
}
[package]
name = "pallet-bridge-parachains"
version = "0.7.0"
description = "Module that allows bridged relay chains to exchange information on their parachains' heads."
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false }
log = { workspace = true }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
# Bridge Dependencies
bp-header-chain = { path = "../../primitives/header-chain", default-features = false }
bp-parachains = { path = "../../primitives/parachains", default-features = false }
bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false }
bp-runtime = { path = "../../primitives/runtime", default-features = false }
pallet-bridge-grandpa = { path = "../grandpa", default-features = false }
# Substrate Dependencies
frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true }
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
bp-header-chain = { path = "../../primitives/header-chain" }
bp-test-utils = { path = "../../primitives/test-utils" }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
[features]
default = ["std"]
std = [
"bp-header-chain/std",
"bp-parachains/std",
"bp-polkadot-core/std",
"bp-runtime/std",
"codec/std",
"frame-benchmarking/std",
"frame-support/std",
"frame-system/std",
"log/std",
"pallet-bridge-grandpa/std",
"scale-info/std",
"sp-runtime/std",
"sp-std/std",
"sp-trie/std",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"pallet-bridge-grandpa/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"pallet-bridge-grandpa/try-runtime",
"sp-runtime/try-runtime",
]
# Bridge Parachains Pallet
The bridge parachains pallet is a light client for one or several parachains of the bridged relay chain.
It serves as a source of finalized parachain headers and is used when you need to build a bridge with
a parachain.
The pallet requires [bridge GRANDPA pallet](../grandpa/) to be deployed at the same chain - it is used
to verify storage proofs, generated at the bridged relay chain.
## A Brief Introduction into Parachains Finality
You can find detailed information on parachains finality in the
[Polkadot-SDK](https://github.com/paritytech/polkadot-sdk) repository. This section gives a brief overview of how the
parachain finality works and how to build a light client for a parachain.
The main thing there is that the parachain generates blocks on its own, but it can't achieve finality without
help of its relay chain. Instead, the parachain collators create a block and hand it over to the relay chain
validators. Validators validate the block and register the new parachain head in the
[`Heads` map](https://github.com/paritytech/polkadot-sdk/blob/bc5005217a8c2e7c95b9011c96d7e619879b1200/polkadot/runtime/parachains/src/paras/mod.rs#L683-L686)
of the [`paras`](https://github.com/paritytech/polkadot-sdk/tree/master/polkadot/runtime/parachains/src/paras) pallet,
deployed at the relay chain. Keep in mind that this pallet, deployed at a relay chain, is **NOT** a bridge pallet,
even though the names are similar.
And what the bridge parachains pallet does, is simply verifying storage proofs of parachain heads within that
`Heads` map. It does that using relay chain header, that has been previously imported by the
[bridge GRANDPA pallet](../grandpa/). Once the proof is verified, the pallet knows that the given parachain
header has been finalized by the relay chain. The parachain header fields may then be used to verify storage
proofs, coming from the parachain. This allows the pallet to be used e.g. as a source of finality for the messages
pallet.
## Pallet Operations
The main entrypoint of the pallet is the `submit_parachain_heads` call. It has three arguments:
- storage proof of parachain heads from the `Heads` map;
- parachain identifiers and hashes of their heads from the storage proof;
- the relay block, at which the storage proof has been generated.
The pallet may track multiple parachains. And the parachains may use different primitives - one may use 128-bit block
numbers, other - 32-bit. To avoid extra decode operations, the pallet is using relay chain block number to order
parachain headers. Any finalized descendant of finalized relay block `RB`, which has parachain block `PB` in
its `Heads` map, is guaranteed to have either `PB`, or its descendant. So parachain block number grows with relay
block number.
The pallet may reject parachain head if it already knows better (or the same) head. In addition, pallet rejects
heads of untracked parachains.
The pallet doesn't track anything behind parachain heads. So it requires no initialization - it is ready to accept
headers right after deployment.
## Non-Essential Functionality
There may be a special account in every runtime where the bridge parachains module is deployed. This
account, named 'module owner', is like a module-level sudo account - he's able to halt and
resume all module operations without requiring runtime upgrade. Calls that are related to this
account are:
- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account;
- `fn set_operating_mode()`: the module owner (or sudo account) may call this function to stop all
module operations. After this call, all finality proofs will be rejected until further `set_operating_mode` call'.
This call may be used when something extraordinary happens with the bridge.
If pallet owner is not defined, the governance may be used to make those calls.
## Signed Extension to Reject Obsolete Headers
It'd be better for anyone (for chain and for submitters) to reject all transactions that are submitting
already known parachain heads to the pallet. This way, we leave block space to other useful transactions and
we don't charge concurrent submitters for their honest actions.
To deal with that, we have a [signed extension](./src/call_ext) that may be added to the runtime.
It does exactly what is required - rejects all transactions with already known heads. The submitter
pays nothing for such transactions - they're simply removed from the transaction pool, when the block
is built.
The signed extension, however, is a bit limited - it only works with transactions that provide single
parachain head. So it won't work with multiple parachain heads transactions. This fits our needs
for [Kusama <> Polkadot bridge](../../docs/polkadot-kusama-bridge-overview.md). If you need to deal
with other transaction formats, you may implement similar extension for your runtime.
You may also take a look at the [`generate_bridge_reject_obsolete_headers_and_messages`](../../bin/runtime-common/src/lib.rs)
macro that bundles several similar signed extensions in a single one.
## Parachains Finality Relay
We have an offchain actor, who is watching for new parachain heads and submits them to the bridged chain.
It is the parachains relay - you may look at the [crate level documentation and the code](../../relays/parachains/).
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Parachains finality pallet benchmarking.
use crate::{
weights_ext::DEFAULT_PARACHAIN_HEAD_SIZE, Call, RelayBlockHash, RelayBlockHasher,
RelayBlockNumber,
};
use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId};
use bp_runtime::StorageProofSize;
use frame_benchmarking::{account, benchmarks_instance_pallet};
use frame_system::RawOrigin;
use sp_std::prelude::*;
/// Pallet we're benchmarking here.
pub struct Pallet<T: Config<I>, I: 'static = ()>(crate::Pallet<T, I>);
/// Trait that must be implemented by runtime to benchmark the parachains finality pallet.
pub trait Config<I: 'static>: crate::Config<I> {
/// Returns vector of supported parachains.
fn parachains() -> Vec<ParaId>;
/// Generate parachain heads proof and prepare environment for verifying this proof.
fn prepare_parachain_heads_proof(
parachains: &[ParaId],
parachain_head_size: u32,
proof_size: StorageProofSize,
) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>);
}
benchmarks_instance_pallet! {
where_clause {
where
<T as pallet_bridge_grandpa::Config<T::BridgesGrandpaPalletInstance>>::BridgedChain:
bp_runtime::Chain<
BlockNumber = RelayBlockNumber,
Hash = RelayBlockHash,
Hasher = RelayBlockHasher,
>,
}
// Benchmark `submit_parachain_heads` extrinsic with different number of parachains.
submit_parachain_heads_with_n_parachains {
let p in 1..(T::parachains().len() + 1) as u32;
let sender = account("sender", 0, 0);
let mut parachains = T::parachains();
let _ = if p <= parachains.len() as u32 {
parachains.split_off(p as usize)
} else {
Default::default()
};
log::trace!(target: crate::LOG_TARGET, "=== {:?}", parachains.len());
let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof(
&parachains,
DEFAULT_PARACHAIN_HEAD_SIZE,
StorageProofSize::Minimal(0),
);
let at_relay_block = (relay_block_number, relay_block_hash);
}: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof)
verify {
for parachain in parachains {
assert!(crate::Pallet::<T, I>::best_parachain_head(parachain).is_some());
}
}
// Benchmark `submit_parachain_heads` extrinsic with 1kb proof size.
submit_parachain_heads_with_1kb_proof {
let sender = account("sender", 0, 0);
let parachains = vec![T::parachains()[0]];
let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof(
&parachains,
DEFAULT_PARACHAIN_HEAD_SIZE,
StorageProofSize::HasLargeLeaf(1024),
);
let at_relay_block = (relay_block_number, relay_block_hash);
}: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof)
verify {
for parachain in parachains {
assert!(crate::Pallet::<T, I>::best_parachain_head(parachain).is_some());
}
}
// Benchmark `submit_parachain_heads` extrinsic with 16kb proof size.
submit_parachain_heads_with_16kb_proof {
let sender = account("sender", 0, 0);
let parachains = vec![T::parachains()[0]];
let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof(
&parachains,
DEFAULT_PARACHAIN_HEAD_SIZE,
StorageProofSize::HasLargeLeaf(16 * 1024),
);
let at_relay_block = (relay_block_number, relay_block_hash);
}: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof)
verify {
for parachain in parachains {
assert!(crate::Pallet::<T, I>::best_parachain_head(parachain).is_some());
}
}
impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime)
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use crate::{Config, GrandpaPalletOf, Pallet, RelayBlockHash, RelayBlockNumber};
use bp_header_chain::HeaderChain;
use bp_parachains::BestParaHeadHash;
use bp_polkadot_core::parachains::{ParaHash, ParaId};
use bp_runtime::OwnedBridgeModule;
use frame_support::{
dispatch::CallableCallFor,
traits::{Get, IsSubType},
};
use pallet_bridge_grandpa::SubmitFinalityProofHelper;
use sp_runtime::{
traits::Zero,
transaction_validity::{InvalidTransaction, TransactionValidityError},
RuntimeDebug,
};
/// Info about a `SubmitParachainHeads` call which tries to update a single parachain.
#[derive(PartialEq, RuntimeDebug)]
pub struct SubmitParachainHeadsInfo {
/// Number and hash of the finalized relay block that has been used to prove parachain
/// finality.
pub at_relay_block: (RelayBlockNumber, RelayBlockHash),
/// Parachain identifier.
pub para_id: ParaId,
/// Hash of the bundled parachain head.
pub para_head_hash: ParaHash,
/// If `true`, then the call must be free (assuming that everything else is valid) to
/// be treated as valid.
pub is_free_execution_expected: bool,
}
/// Verified `SubmitParachainHeadsInfo`.
#[derive(PartialEq, RuntimeDebug)]
pub struct VerifiedSubmitParachainHeadsInfo {
/// Base call information.
pub base: SubmitParachainHeadsInfo,
/// A difference between bundled bridged relay chain header and relay chain header number
/// used to prove best bridged parachain header, known to us before the call.
pub improved_by: RelayBlockNumber,
}
/// Helper struct that provides methods for working with the `SubmitParachainHeads` call.
pub struct SubmitParachainHeadsHelper<T: Config<I>, I: 'static> {
_phantom_data: sp_std::marker::PhantomData<(T, I)>,
}
impl<T: Config<I>, I: 'static> SubmitParachainHeadsHelper<T, I> {
/// Check that is called from signed extension and takes the `is_free_execution_expected`
/// into account.
pub fn check_obsolete_from_extension(
update: &SubmitParachainHeadsInfo,
) -> Result<RelayBlockNumber, TransactionValidityError> {
// first do all base checks
let improved_by = Self::check_obsolete(update)?;
// if we don't expect free execution - no more checks
if !update.is_free_execution_expected {
return Ok(improved_by);
}
// reject if no more free slots remaining in the block
if !SubmitFinalityProofHelper::<T, T::BridgesGrandpaPalletInstance>::can_import_anything_for_free() {
log::trace!(
target: crate::LOG_TARGET,
"The free parachain {:?} head can't be updated: no more free slots \
left in the block.",
update.para_id,
);
return Err(InvalidTransaction::Call.into());
}
// reject if we are importing parachain headers too often
if let Some(free_headers_interval) = T::FreeHeadersInterval::get() {
let reject = improved_by < free_headers_interval;
if reject {
log::trace!(
target: crate::LOG_TARGET,
"The free parachain {:?} head can't be updated: it improves previous
best head by {} while at least {} is expected.",
update.para_id,
improved_by,
free_headers_interval,
);
return Err(InvalidTransaction::Stale.into());
}
} else {
// free headers interval is not configured and call is expected to execute
// for free => it is a relayer error, it should've been able to detect that
}
Ok(improved_by)
}
/// Check if the para head provided by the `SubmitParachainHeads` is better than the best one
/// we know.
pub fn check_obsolete(
update: &SubmitParachainHeadsInfo,
) -> Result<RelayBlockNumber, TransactionValidityError> {
// check if we know better parachain head already
let improved_by = match crate::ParasInfo::<T, I>::get(update.para_id) {
Some(stored_best_head) => {
let improved_by = match update
.at_relay_block
.0
.checked_sub(stored_best_head.best_head_hash.at_relay_block_number)
{
Some(improved_by) if improved_by > Zero::zero() => improved_by,
_ => {
log::trace!(
target: crate::LOG_TARGET,
"The parachain head can't be updated. The parachain head for {:?} \
was already updated at better relay chain block {} >= {}.",
update.para_id,
stored_best_head.best_head_hash.at_relay_block_number,
update.at_relay_block.0
);
return Err(InvalidTransaction::Stale.into())
},
};
if stored_best_head.best_head_hash.head_hash == update.para_head_hash {
log::trace!(
target: crate::LOG_TARGET,
"The parachain head can't be updated. The parachain head hash for {:?} \
was already updated to {} at block {} < {}.",
update.para_id,
update.para_head_hash,
stored_best_head.best_head_hash.at_relay_block_number,
update.at_relay_block.0
);
return Err(InvalidTransaction::Stale.into())
}
improved_by
},
None => RelayBlockNumber::MAX,
};
// let's check if our chain had no reorgs and we still know the relay chain header
// used to craft the proof
if GrandpaPalletOf::<T, I>::finalized_header_state_root(update.at_relay_block.1).is_none() {
log::trace!(
target: crate::LOG_TARGET,
"The parachain {:?} head can't be updated. Relay chain header {}/{} used to create \
parachain proof is missing from the storage.",
update.para_id,
update.at_relay_block.0,
update.at_relay_block.1,
);
return Err(InvalidTransaction::Call.into())
}
Ok(improved_by)
}
/// Check if the `SubmitParachainHeads` was successfully executed.
pub fn was_successful(update: &SubmitParachainHeadsInfo) -> bool {
match crate::ParasInfo::<T, I>::get(update.para_id) {
Some(stored_best_head) =>
stored_best_head.best_head_hash ==
BestParaHeadHash {
at_relay_block_number: update.at_relay_block.0,
head_hash: update.para_head_hash,
},
None => false,
}
}
}
/// Trait representing a call that is a sub type of this pallet's call.
pub trait CallSubType<T: Config<I, RuntimeCall = Self>, I: 'static>:
IsSubType<CallableCallFor<Pallet<T, I>, T>>
{
/// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with
/// one single parachain entry.
fn one_entry_submit_parachain_heads_info(&self) -> Option<SubmitParachainHeadsInfo> {
match self.is_sub_type() {
Some(crate::Call::<T, I>::submit_parachain_heads {
ref at_relay_block,
ref parachains,
..
}) => match &parachains[..] {
&[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo {
at_relay_block: *at_relay_block,
para_id,
para_head_hash,
is_free_execution_expected: false,
}),
_ => None,
},
Some(crate::Call::<T, I>::submit_parachain_heads_ex {
ref at_relay_block,
ref parachains,
is_free_execution_expected,
..
}) => match &parachains[..] {
&[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo {
at_relay_block: *at_relay_block,
para_id,
para_head_hash,
is_free_execution_expected: *is_free_execution_expected,
}),
_ => None,
},
_ => None,
}
}
/// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with
/// one single parachain entry, if the entry is for the provided parachain id.
fn submit_parachain_heads_info_for(&self, para_id: u32) -> Option<SubmitParachainHeadsInfo> {
self.one_entry_submit_parachain_heads_info()
.filter(|update| update.para_id.0 == para_id)
}
/// Validate parachain heads in order to avoid "mining" transactions that provide
/// outdated bridged parachain heads. Without this validation, even honest relayers
/// may lose their funds if there are multiple relays running and submitting the
/// same information.
///
/// This validation only works with transactions that are updating single parachain
/// head. We can't use unbounded validation - it may take too long and either break
/// block production, or "eat" significant portion of block production time literally
/// for nothing. In addition, the single-parachain-head-per-transaction is how the
/// pallet will be used in our environment.
fn check_obsolete_submit_parachain_heads(
&self,
) -> Result<Option<VerifiedSubmitParachainHeadsInfo>, TransactionValidityError>
where
Self: Sized,
{
let update = match self.one_entry_submit_parachain_heads_info() {
Some(update) => update,
None => return Ok(None),
};
if Pallet::<T, I>::ensure_not_halted().is_err() {
return Err(InvalidTransaction::Call.into())
}
SubmitParachainHeadsHelper::<T, I>::check_obsolete_from_extension(&update)
.map(|improved_by| Some(VerifiedSubmitParachainHeadsInfo { base: update, improved_by }))
}
}
impl<T, I: 'static> CallSubType<T, I> for T::RuntimeCall
where
T: Config<I>,
T::RuntimeCall: IsSubType<CallableCallFor<Pallet<T, I>, T>>,
{
}
#[cfg(test)]
mod tests {
use crate::{
mock::{run_test, FreeHeadersInterval, RuntimeCall, TestRuntime},
CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockHash, RelayBlockNumber,
};
use bp_header_chain::StoredHeaderData;
use bp_parachains::BestParaHeadHash;
use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId};
use bp_runtime::BasicOperatingMode;
fn validate_submit_parachain_heads(
num: RelayBlockNumber,
parachains: Vec<(ParaId, ParaHash)>,
) -> bool {
RuntimeCall::Parachains(crate::Call::<TestRuntime, ()>::submit_parachain_heads_ex {
at_relay_block: (num, [num as u8; 32].into()),
parachains,
parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() },
is_free_execution_expected: false,
})
.check_obsolete_submit_parachain_heads()
.is_ok()
}
fn validate_free_submit_parachain_heads(
num: RelayBlockNumber,
parachains: Vec<(ParaId, ParaHash)>,
) -> bool {
RuntimeCall::Parachains(crate::Call::<TestRuntime, ()>::submit_parachain_heads_ex {
at_relay_block: (num, [num as u8; 32].into()),
parachains,
parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() },
is_free_execution_expected: true,
})
.check_obsolete_submit_parachain_heads()
.is_ok()
}
fn insert_relay_block(num: RelayBlockNumber) {
pallet_bridge_grandpa::ImportedHeaders::<TestRuntime, crate::Instance1>::insert(
RelayBlockHash::from([num as u8; 32]),
StoredHeaderData { number: num, state_root: RelayBlockHash::from([10u8; 32]) },
);
}
fn sync_to_relay_header_10() {
ParasInfo::<TestRuntime, ()>::insert(
ParaId(1),
ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 10,
head_hash: [1u8; 32].into(),
},
next_imported_hash_position: 0,
},
);
}
#[test]
fn extension_rejects_header_from_the_obsolete_relay_block() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#5 => tx is
// rejected
sync_to_relay_header_10();
assert!(!validate_submit_parachain_heads(5, vec![(ParaId(1), [1u8; 32].into())]));
});
}
#[test]
fn extension_rejects_header_from_the_same_relay_block() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#10 => tx is
// rejected
sync_to_relay_header_10();
assert!(!validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())]));
});
}
#[test]
fn extension_rejects_header_from_new_relay_block_with_same_hash() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#10 => tx is
// rejected
sync_to_relay_header_10();
assert!(!validate_submit_parachain_heads(20, vec![(ParaId(1), [1u8; 32].into())]));
});
}
#[test]
fn extension_rejects_header_if_pallet_is_halted() {
run_test(|| {
// when pallet is halted => tx is rejected
sync_to_relay_header_10();
PalletOperatingMode::<TestRuntime, ()>::put(BasicOperatingMode::Halted);
assert!(!validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())]));
});
}
#[test]
fn extension_accepts_new_header() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#15 => tx is
// accepted
sync_to_relay_header_10();
insert_relay_block(15);
assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())]));
});
}
#[test]
fn extension_accepts_if_more_than_one_parachain_is_submitted() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#5, but another
// parachain head is also supplied => tx is accepted
sync_to_relay_header_10();
assert!(validate_submit_parachain_heads(
5,
vec![(ParaId(1), [1u8; 32].into()), (ParaId(2), [1u8; 32].into())]
));
});
}
#[test]
fn extension_rejects_initial_parachain_head_if_missing_relay_chain_header() {
run_test(|| {
// when relay chain header is unknown => "obsolete"
assert!(!validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())]));
// when relay chain header is unknown => "ok"
insert_relay_block(10);
assert!(validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())]));
});
}
#[test]
fn extension_rejects_free_parachain_head_if_missing_relay_chain_header() {
run_test(|| {
sync_to_relay_header_10();
// when relay chain header is unknown => "obsolete"
assert!(!validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())]));
// when relay chain header is unknown => "ok"
insert_relay_block(15);
assert!(validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())]));
});
}
#[test]
fn extension_rejects_free_parachain_head_if_no_free_slots_remaining() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#15 => tx should
// be accepted
sync_to_relay_header_10();
insert_relay_block(15);
// ... but since we have specified `is_free_execution_expected = true`, it'll be
// rejected
assert!(!validate_free_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())]));
// ... if we have specify `is_free_execution_expected = false`, it'll be accepted
assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())]));
});
}
#[test]
fn extension_rejects_free_parachain_head_if_improves_by_is_below_expected() {
run_test(|| {
// when current best finalized is #10 and we're trying to import header#15 => tx should
// be accepted
sync_to_relay_header_10();
insert_relay_block(10 + FreeHeadersInterval::get() - 1);
insert_relay_block(10 + FreeHeadersInterval::get());
// try to submit at 10 + FreeHeadersInterval::get() - 1 => failure
let relay_header = 10 + FreeHeadersInterval::get() - 1;
assert!(!validate_free_submit_parachain_heads(
relay_header,
vec![(ParaId(1), [2u8; 32].into())]
));
// try to submit at 10 + FreeHeadersInterval::get() => ok
let relay_header = 10 + FreeHeadersInterval::get();
assert!(validate_free_submit_parachain_heads(
relay_header,
vec![(ParaId(1), [2u8; 32].into())]
));
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Parachains finality module.
//!
//! This module needs to be deployed with GRANDPA module, which is syncing relay
//! chain blocks. The main entry point of this module is `submit_parachain_heads`, which
//! accepts storage proof of some parachain `Heads` entries from bridged relay chain.
//! It requires corresponding relay headers to be already synced.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
pub use weights::WeightInfo;
pub use weights_ext::WeightInfoExt;
use bp_header_chain::{HeaderChain, HeaderChainError};
use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHeaderData};
use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId};
use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError};
use frame_support::{dispatch::PostDispatchInfo, DefaultNoBound};
use pallet_bridge_grandpa::SubmitFinalityProofHelper;
use sp_std::{marker::PhantomData, vec::Vec};
#[cfg(feature = "runtime-benchmarks")]
use bp_parachains::ParaStoredHeaderDataBuilder;
#[cfg(feature = "runtime-benchmarks")]
use bp_runtime::HeaderOf;
#[cfg(feature = "runtime-benchmarks")]
use codec::Encode;
// Re-export in crate namespace for `construct_runtime!`.
pub use call_ext::*;
pub use pallet::*;
pub mod weights;
pub mod weights_ext;
#[cfg(feature = "runtime-benchmarks")]
pub mod benchmarking;
mod call_ext;
#[cfg(test)]
mod mock;
/// The target that will be used when publishing logs related to this pallet.
pub const LOG_TARGET: &str = "runtime::bridge-parachains";
/// Block hash of the bridged relay chain.
pub type RelayBlockHash = bp_polkadot_core::Hash;
/// Block number of the bridged relay chain.
pub type RelayBlockNumber = bp_polkadot_core::BlockNumber;
/// Hasher of the bridged relay chain.
pub type RelayBlockHasher = bp_polkadot_core::Hasher;
/// Artifacts of the parachains head update.
struct UpdateParachainHeadArtifacts {
/// New best head of the parachain.
pub best_head: ParaInfo,
/// If `true`, some old parachain head has been pruned during update.
pub prune_happened: bool,
}
#[frame_support::pallet]
pub mod pallet {
use super::*;
use bp_parachains::{
BestParaHeadHash, ImportedParaHeadsKeyProvider, ParaStoredHeaderDataBuilder,
ParasInfoKeyProvider,
};
use bp_runtime::{
BasicOperatingMode, BoundedStorageValue, OwnedBridgeModule, StorageDoubleMapKeyProvider,
StorageMapKeyProvider,
};
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
/// Stored parachain head data of given parachains pallet.
pub type StoredParaHeadDataOf<T, I> =
BoundedStorageValue<<T as Config<I>>::MaxParaHeadDataSize, ParaStoredHeaderData>;
/// Weight info of the given parachains pallet.
pub type WeightInfoOf<T, I> = <T as Config<I>>::WeightInfo;
/// Bridge GRANDPA pallet that is used to verify parachain proofs.
pub type GrandpaPalletOf<T, I> =
pallet_bridge_grandpa::Pallet<T, <T as Config<I>>::BridgesGrandpaPalletInstance>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config<I>, I: 'static = ()> {
/// The caller has provided head of parachain that the pallet is not configured to track.
UntrackedParachainRejected {
/// Identifier of the parachain that is not tracked by the pallet.
parachain: ParaId,
},
/// The caller has declared that he has provided given parachain head, but it is missing
/// from the storage proof.
MissingParachainHead {
/// Identifier of the parachain with missing head.
parachain: ParaId,
},
/// The caller has provided parachain head hash that is not matching the hash read from the
/// storage proof.
IncorrectParachainHeadHash {
/// Identifier of the parachain with incorrect head hast.
parachain: ParaId,
/// Specified parachain head hash.
parachain_head_hash: ParaHash,
/// Actual parachain head hash.
actual_parachain_head_hash: ParaHash,
},
/// The caller has provided obsolete parachain head, which is already known to the pallet.
RejectedObsoleteParachainHead {
/// Identifier of the parachain with obsolete head.
parachain: ParaId,
/// Obsolete parachain head hash.
parachain_head_hash: ParaHash,
},
/// The caller has provided parachain head that exceeds the maximal configured head size.
RejectedLargeParachainHead {
/// Identifier of the parachain with rejected head.
parachain: ParaId,
/// Parachain head hash.
parachain_head_hash: ParaHash,
/// Parachain head size.
parachain_head_size: u32,
},
/// Parachain head has been updated.
UpdatedParachainHead {
/// Identifier of the parachain that has been updated.
parachain: ParaId,
/// Parachain head hash.
parachain_head_hash: ParaHash,
},
}
#[pallet::error]
pub enum Error<T, I = ()> {
/// Relay chain block hash is unknown to us.
UnknownRelayChainBlock,
/// The number of stored relay block is different from what the relayer has provided.
InvalidRelayChainBlockNumber,
/// Parachain heads storage proof is invalid.
HeaderChainStorageProof(HeaderChainError),
/// Error generated by the `OwnedBridgeModule` trait.
BridgeModule(bp_runtime::OwnedBridgeModuleError),
}
/// Convenience trait for defining `BridgedChain` bounds.
pub trait BoundedBridgeGrandpaConfig<I: 'static>:
pallet_bridge_grandpa::Config<I, BridgedChain = Self::BridgedRelayChain>
{
/// Type of the bridged relay chain.
type BridgedRelayChain: Chain<
BlockNumber = RelayBlockNumber,
Hash = RelayBlockHash,
Hasher = RelayBlockHasher,
>;
}
impl<T, I: 'static> BoundedBridgeGrandpaConfig<I> for T
where
T: pallet_bridge_grandpa::Config<I>,
T::BridgedChain:
Chain<BlockNumber = RelayBlockNumber, Hash = RelayBlockHash, Hasher = RelayBlockHasher>,
{
type BridgedRelayChain = T::BridgedChain;
}
#[pallet::config]
#[pallet::disable_frame_system_supertrait_check]
pub trait Config<I: 'static = ()>:
BoundedBridgeGrandpaConfig<Self::BridgesGrandpaPalletInstance>
{
/// The overarching event type.
type RuntimeEvent: From<Event<Self, I>>
+ IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Benchmarks results from runtime we're plugged into.
type WeightInfo: WeightInfoExt;
/// Instance of bridges GRANDPA pallet (within this runtime) that this pallet is linked to.
///
/// The GRANDPA pallet instance must be configured to import headers of relay chain that
/// we're interested in.
///
/// The associated GRANDPA pallet is also used to configure free parachain heads
/// submissions. The parachain head submission will be free if:
///
/// 1) the submission contains exactly one parachain head update that succeeds;
///
/// 2) the difference between relay chain block numbers, used to prove new parachain head
/// and previous best parachain head is larger than the `FreeHeadersInterval`, configured
/// at the associated GRANDPA pallet;
///
/// 3) there are slots for free submissions, remaining at the block. This is also configured
/// at the associated GRANDPA pallet using `MaxFreeHeadersPerBlock` parameter.
///
/// First parachain head submission is also free for the submitted, if free submissions
/// are yet accepted to this block.
type BridgesGrandpaPalletInstance: 'static;
/// Name of the original `paras` pallet in the `construct_runtime!()` call at the bridged
/// chain.
///
/// Please keep in mind that this should be the name of the `runtime_parachains::paras`
/// pallet from polkadot repository, not the `pallet-bridge-parachains`.
#[pallet::constant]
type ParasPalletName: Get<&'static str>;
/// Parachain head data builder.
///
/// We never store parachain heads here, since they may be too big (e.g. because of large
/// digest items). Instead we're using the same approach as `pallet-bridge-grandpa`
/// pallet - we are only storing `bp_messages::StoredHeaderData` (number and state root),
/// which is enough for our applications. However, we work with different parachains here
/// and they can use different primitives (for block numbers and hash). So we can't store
/// it directly. Instead, we're storing `bp_messages::StoredHeaderData` in SCALE-encoded
/// form, wrapping it into `bp_parachains::ParaStoredHeaderData`.
///
/// This builder helps to convert from `HeadData` to `bp_parachains::ParaStoredHeaderData`.
type ParaStoredHeaderDataBuilder: ParaStoredHeaderDataBuilder;
/// Maximal number of single parachain heads to keep in the storage.
///
/// The setting is there to prevent growing the on-chain state indefinitely. Note
/// the setting does not relate to parachain block numbers - we will simply keep as much
/// items in the storage, so it doesn't guarantee any fixed timeframe for heads.
///
/// Incautious change of this constant may lead to orphan entries in the runtime storage.
#[pallet::constant]
type HeadsToKeep: Get<u32>;
/// Maximal size (in bytes) of the SCALE-encoded parachain head data
/// (`bp_parachains::ParaStoredHeaderData`).
///
/// Keep in mind that the size of any tracked parachain header data must not exceed this
/// value. So if you're going to track multiple parachains, one of which is using large
/// hashes, you shall choose this maximal value.
///
/// There's no mandatory headers in this pallet, so it can't stall if there's some header
/// that exceeds this bound.
#[pallet::constant]
type MaxParaHeadDataSize: Get<u32>;
}
/// Optional pallet owner.
///
/// Pallet owner has a right to halt all pallet operations and then resume them. If it is
/// `None`, then there are no direct ways to halt/resume pallet operations, but other
/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
/// flag directly or call the `halt_operations`).
#[pallet::storage]
pub type PalletOwner<T: Config<I>, I: 'static = ()> =
StorageValue<_, T::AccountId, OptionQuery>;
/// The current operating mode of the pallet.
///
/// Depending on the mode either all, or no transactions will be allowed.
#[pallet::storage]
pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
StorageValue<_, BasicOperatingMode, ValueQuery>;
/// Parachains info.
///
/// Contains the following info:
/// - best parachain head hash
/// - the head of the `ImportedParaHashes` ring buffer
#[pallet::storage]
pub type ParasInfo<T: Config<I>, I: 'static = ()> = StorageMap<
Hasher = <ParasInfoKeyProvider as StorageMapKeyProvider>::Hasher,
Key = <ParasInfoKeyProvider as StorageMapKeyProvider>::Key,
Value = <ParasInfoKeyProvider as StorageMapKeyProvider>::Value,
QueryKind = OptionQuery,
OnEmpty = GetDefault,
MaxValues = MaybeMaxParachains<T, I>,
>;
/// State roots of parachain heads which have been imported into the pallet.
#[pallet::storage]
pub type ImportedParaHeads<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
Hasher1 = <ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Hasher1,
Key1 = <ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Key1,
Hasher2 = <ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Hasher2,
Key2 = <ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Key2,
Value = StoredParaHeadDataOf<T, I>,
QueryKind = OptionQuery,
OnEmpty = GetDefault,
MaxValues = MaybeMaxTotalParachainHashes<T, I>,
>;
/// A ring buffer of imported parachain head hashes. Ordered by the insertion time.
#[pallet::storage]
pub(super) type ImportedParaHashes<T: Config<I>, I: 'static = ()> = StorageDoubleMap<
Hasher1 = Blake2_128Concat,
Key1 = ParaId,
Hasher2 = Twox64Concat,
Key2 = u32,
Value = ParaHash,
QueryKind = OptionQuery,
OnEmpty = GetDefault,
MaxValues = MaybeMaxTotalParachainHashes<T, I>,
>;
#[pallet::pallet]
pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
impl<T: Config<I>, I: 'static> OwnedBridgeModule<T> for Pallet<T, I> {
const LOG_TARGET: &'static str = LOG_TARGET;
type OwnerStorage = PalletOwner<T, I>;
type OperatingMode = BasicOperatingMode;
type OperatingModeStorage = PalletOperatingMode<T, I>;
}
#[pallet::call]
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Submit proof of one or several parachain heads.
///
/// The proof is supposed to be proof of some `Heads` entries from the
/// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain.
/// The proof is supposed to be crafted at the `relay_header_hash` that must already be
/// imported by corresponding GRANDPA pallet at this chain.
///
/// The call fails if:
///
/// - the pallet is halted;
///
/// - the relay chain block `at_relay_block` is not imported by the associated bridge
/// GRANDPA pallet.
///
/// The call may succeed, but some heads may not be updated e.g. because pallet knows
/// better head or it isn't tracked by the pallet.
#[pallet::call_index(0)]
#[pallet::weight(WeightInfoOf::<T, I>::submit_parachain_heads_weight(
T::DbWeight::get(),
parachain_heads_proof,
parachains.len() as _,
))]
pub fn submit_parachain_heads(
origin: OriginFor<T>,
at_relay_block: (RelayBlockNumber, RelayBlockHash),
parachains: Vec<(ParaId, ParaHash)>,
parachain_heads_proof: ParaHeadsProof,
) -> DispatchResultWithPostInfo {
Self::submit_parachain_heads_ex(
origin,
at_relay_block,
parachains,
parachain_heads_proof,
false,
)
}
/// Change `PalletOwner`.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(1)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_owner(origin, new_owner)
}
/// Halt or resume all pallet operations.
///
/// May only be called either by root, or by `PalletOwner`.
#[pallet::call_index(2)]
#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
pub fn set_operating_mode(
origin: OriginFor<T>,
operating_mode: BasicOperatingMode,
) -> DispatchResult {
<Self as OwnedBridgeModule<_>>::set_operating_mode(origin, operating_mode)
}
/// Submit proof of one or several parachain heads.
///
/// The proof is supposed to be proof of some `Heads` entries from the
/// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain.
/// The proof is supposed to be crafted at the `relay_header_hash` that must already be
/// imported by corresponding GRANDPA pallet at this chain.
///
/// The call fails if:
///
/// - the pallet is halted;
///
/// - the relay chain block `at_relay_block` is not imported by the associated bridge
/// GRANDPA pallet.
///
/// The call may succeed, but some heads may not be updated e.g. because pallet knows
/// better head or it isn't tracked by the pallet.
///
/// The `is_free_execution_expected` parameter is not really used inside the call. It is
/// used by the transaction extension, which should be registered at the runtime level. If
/// this parameter is `true`, the transaction will be treated as invalid, if the call won't
/// be executed for free. If transaction extension is not used by the runtime, this
/// parameter is not used at all.
#[pallet::call_index(3)]
#[pallet::weight(WeightInfoOf::<T, I>::submit_parachain_heads_weight(
T::DbWeight::get(),
parachain_heads_proof,
parachains.len() as _,
))]
pub fn submit_parachain_heads_ex(
origin: OriginFor<T>,
at_relay_block: (RelayBlockNumber, RelayBlockHash),
parachains: Vec<(ParaId, ParaHash)>,
parachain_heads_proof: ParaHeadsProof,
_is_free_execution_expected: bool,
) -> DispatchResultWithPostInfo {
Self::ensure_not_halted().map_err(Error::<T, I>::BridgeModule)?;
ensure_signed(origin)?;
let total_parachains = parachains.len();
let free_headers_interval =
T::FreeHeadersInterval::get().unwrap_or(RelayBlockNumber::MAX);
// the pallet allows two kind of free submissions
// 1) if distance between all parachain heads is gte than the [`T::FreeHeadersInterval`]
// 2) if all heads are the first heads of their parachains
let mut free_parachain_heads = 0;
// we'll need relay chain header to verify that parachains heads are always increasing.
let (relay_block_number, relay_block_hash) = at_relay_block;
let relay_block = pallet_bridge_grandpa::ImportedHeaders::<
T,
T::BridgesGrandpaPalletInstance,
>::get(relay_block_hash)
.ok_or(Error::<T, I>::UnknownRelayChainBlock)?;
ensure!(
relay_block.number == relay_block_number,
Error::<T, I>::InvalidRelayChainBlockNumber,
);
// now parse storage proof and read parachain heads
let mut actual_weight = WeightInfoOf::<T, I>::submit_parachain_heads_weight(
T::DbWeight::get(),
&parachain_heads_proof,
parachains.len() as _,
);
let mut is_updated_something = false;
let mut storage = GrandpaPalletOf::<T, I>::storage_proof_checker(
relay_block_hash,
parachain_heads_proof.storage_proof,
)
.map_err(Error::<T, I>::HeaderChainStorageProof)?;
for (parachain, parachain_head_hash) in parachains {
let parachain_head = match Self::read_parachain_head(&mut storage, parachain) {
Ok(Some(parachain_head)) => parachain_head,
Ok(None) => {
log::trace!(
target: LOG_TARGET,
"The head of parachain {:?} is None. {}",
parachain,
if ParasInfo::<T, I>::contains_key(parachain) {
"Looks like it is not yet registered at the source relay chain"
} else {
"Looks like it has been deregistered from the source relay chain"
},
);
Self::deposit_event(Event::MissingParachainHead { parachain });
continue
},
Err(e) => {
log::trace!(
target: LOG_TARGET,
"The read of head of parachain {:?} has failed: {:?}",
parachain,
e,
);
Self::deposit_event(Event::MissingParachainHead { parachain });
continue
},
};
// if relayer has specified invalid parachain head hash, ignore the head
// (this isn't strictly necessary, but better safe than sorry)
let actual_parachain_head_hash = parachain_head.hash();
if parachain_head_hash != actual_parachain_head_hash {
log::trace!(
target: LOG_TARGET,
"The submitter has specified invalid parachain {:?} head hash: \
{:?} vs {:?}",
parachain,
parachain_head_hash,
actual_parachain_head_hash,
);
Self::deposit_event(Event::IncorrectParachainHeadHash {
parachain,
parachain_head_hash,
actual_parachain_head_hash,
});
continue
}
// convert from parachain head into stored parachain head data
let parachain_head_size = parachain_head.0.len();
let parachain_head_data =
match T::ParaStoredHeaderDataBuilder::try_build(parachain, &parachain_head) {
Some(parachain_head_data) => parachain_head_data,
None => {
log::trace!(
target: LOG_TARGET,
"The head of parachain {:?} has been provided, but it is not tracked by the pallet",
parachain,
);
Self::deposit_event(Event::UntrackedParachainRejected { parachain });
continue
},
};
let update_result: Result<_, ()> =
ParasInfo::<T, I>::try_mutate(parachain, |stored_best_head| {
let is_free = parachain_head_size <
T::ParaStoredHeaderDataBuilder::max_free_head_size() as usize &&
match stored_best_head {
Some(ref best_head)
if at_relay_block.0.saturating_sub(
best_head.best_head_hash.at_relay_block_number,
) >= free_headers_interval =>
true,
Some(_) => false,
None => true,
};
let artifacts = Pallet::<T, I>::update_parachain_head(
parachain,
stored_best_head.take(),
(relay_block_number, relay_block_hash),
parachain_head_data,
parachain_head_hash,
)?;
is_updated_something = true;
if is_free {
free_parachain_heads = free_parachain_heads + 1;
}
*stored_best_head = Some(artifacts.best_head);
Ok(artifacts.prune_happened)
});
// we're refunding weight if update has not happened and if pruning has not happened
let is_update_happened = update_result.is_ok();
if !is_update_happened {
actual_weight = actual_weight.saturating_sub(
WeightInfoOf::<T, I>::parachain_head_storage_write_weight(
T::DbWeight::get(),
),
);
}
let is_prune_happened = matches!(update_result, Ok(true));
if !is_prune_happened {
actual_weight = actual_weight.saturating_sub(
WeightInfoOf::<T, I>::parachain_head_pruning_weight(T::DbWeight::get()),
);
}
}
// even though we may have accepted some parachain heads, we can't allow relayers to
// submit proof with unused trie nodes
// => treat this as an error
//
// (we can throw error here, because now all our calls are transactional)
storage.ensure_no_unused_nodes().map_err(|e| {
Error::<T, I>::HeaderChainStorageProof(HeaderChainError::StorageProof(e))
})?;
// check if we allow this submission for free
let is_free = total_parachains == 1
&& free_parachain_heads == total_parachains
&& SubmitFinalityProofHelper::<T, T::BridgesGrandpaPalletInstance>::can_import_anything_for_free();
let pays_fee = if is_free {
log::trace!(target: LOG_TARGET, "Parachain heads update transaction is free");
pallet_bridge_grandpa::on_free_header_imported::<T, T::BridgesGrandpaPalletInstance>(
);
Pays::No
} else {
log::trace!(target: LOG_TARGET, "Parachain heads update transaction is paid");
Pays::Yes
};
Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee })
}
}
impl<T: Config<I>, I: 'static> Pallet<T, I> {
/// Get stored parachain info.
pub fn best_parachain_info(parachain: ParaId) -> Option<ParaInfo> {
ParasInfo::<T, I>::get(parachain)
}
/// Get best finalized head data of the given parachain.
pub fn best_parachain_head(parachain: ParaId) -> Option<ParaStoredHeaderData> {
let best_para_head_hash = ParasInfo::<T, I>::get(parachain)?.best_head_hash.head_hash;
ImportedParaHeads::<T, I>::get(parachain, best_para_head_hash).map(|h| h.into_inner())
}
/// Get best finalized head hash of the given parachain.
pub fn best_parachain_head_hash(parachain: ParaId) -> Option<ParaHash> {
Some(ParasInfo::<T, I>::get(parachain)?.best_head_hash.head_hash)
}
/// Get best finalized head id of the given parachain.
pub fn best_parachain_head_id<C: Chain<Hash = ParaHash> + Parachain>(
) -> Result<Option<HeaderIdOf<C>>, codec::Error> {
let parachain = ParaId(C::PARACHAIN_ID);
let best_head_hash = match Self::best_parachain_head_hash(parachain) {
Some(best_head_hash) => best_head_hash,
None => return Ok(None),
};
let encoded_head = match Self::parachain_head(parachain, best_head_hash) {
Some(encoded_head) => encoded_head,
None => return Ok(None),
};
encoded_head
.decode_parachain_head_data::<C>()
.map(|data| Some(HeaderId(data.number, best_head_hash)))
}
/// Get parachain head data with given hash.
pub fn parachain_head(parachain: ParaId, hash: ParaHash) -> Option<ParaStoredHeaderData> {
ImportedParaHeads::<T, I>::get(parachain, hash).map(|h| h.into_inner())
}
/// Read parachain head from storage proof.
fn read_parachain_head(
storage: &mut bp_runtime::StorageProofChecker<RelayBlockHasher>,
parachain: ParaId,
) -> Result<Option<ParaHead>, StorageProofError> {
let parachain_head_key =
parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain);
storage.read_and_decode_value(parachain_head_key.0.as_ref())
}
/// Try to update parachain head.
pub(super) fn update_parachain_head(
parachain: ParaId,
stored_best_head: Option<ParaInfo>,
new_at_relay_block: (RelayBlockNumber, RelayBlockHash),
new_head_data: ParaStoredHeaderData,
new_head_hash: ParaHash,
) -> Result<UpdateParachainHeadArtifacts, ()> {
// check if head has been already updated at better relay chain block. Without this
// check, we may import heads in random order
let update = SubmitParachainHeadsInfo {
at_relay_block: new_at_relay_block,
para_id: parachain,
para_head_hash: new_head_hash,
// don't actually matter here
is_free_execution_expected: false,
};
if SubmitParachainHeadsHelper::<T, I>::check_obsolete(&update).is_err() {
Self::deposit_event(Event::RejectedObsoleteParachainHead {
parachain,
parachain_head_hash: new_head_hash,
});
return Err(())
}
// verify that the parachain head data size is <= `MaxParaHeadDataSize`
let updated_head_data =
match StoredParaHeadDataOf::<T, I>::try_from_inner(new_head_data) {
Ok(updated_head_data) => updated_head_data,
Err(e) => {
log::trace!(
target: LOG_TARGET,
"The parachain head can't be updated. The parachain head data size \
for {:?} is {}. It exceeds maximal configured size {}.",
parachain,
e.value_size,
e.maximal_size,
);
Self::deposit_event(Event::RejectedLargeParachainHead {
parachain,
parachain_head_hash: new_head_hash,
parachain_head_size: e.value_size as _,
});
return Err(())
},
};
let next_imported_hash_position = stored_best_head
.map_or(0, |stored_best_head| stored_best_head.next_imported_hash_position);
// insert updated best parachain head
let head_hash_to_prune =
ImportedParaHashes::<T, I>::try_get(parachain, next_imported_hash_position);
let updated_best_para_head = ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: new_at_relay_block.0,
head_hash: new_head_hash,
},
next_imported_hash_position: (next_imported_hash_position + 1) %
T::HeadsToKeep::get(),
};
ImportedParaHashes::<T, I>::insert(
parachain,
next_imported_hash_position,
new_head_hash,
);
ImportedParaHeads::<T, I>::insert(parachain, new_head_hash, updated_head_data);
log::trace!(
target: LOG_TARGET,
"Updated head of parachain {:?} to {} at relay block {}",
parachain,
new_head_hash,
new_at_relay_block.0,
);
// remove old head
let prune_happened = head_hash_to_prune.is_ok();
if let Ok(head_hash_to_prune) = head_hash_to_prune {
log::trace!(
target: LOG_TARGET,
"Pruning old head of parachain {:?}: {}",
parachain,
head_hash_to_prune,
);
ImportedParaHeads::<T, I>::remove(parachain, head_hash_to_prune);
}
Self::deposit_event(Event::UpdatedParachainHead {
parachain,
parachain_head_hash: new_head_hash,
});
Ok(UpdateParachainHeadArtifacts { best_head: updated_best_para_head, prune_happened })
}
}
#[pallet::genesis_config]
#[derive(DefaultNoBound)]
pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
/// Initial pallet operating mode.
pub operating_mode: BasicOperatingMode,
/// Initial pallet owner.
pub owner: Option<T::AccountId>,
/// Dummy marker.
pub phantom: sp_std::marker::PhantomData<I>,
}
#[pallet::genesis_build]
impl<T: Config<I>, I: 'static> BuildGenesisConfig for GenesisConfig<T, I> {
fn build(&self) {
PalletOperatingMode::<T, I>::put(self.operating_mode);
if let Some(ref owner) = self.owner {
PalletOwner::<T, I>::put(owner);
}
}
}
/// Returns maximal number of parachains, supported by the pallet.
pub struct MaybeMaxParachains<T, I>(PhantomData<(T, I)>);
impl<T: Config<I>, I: 'static> Get<Option<u32>> for MaybeMaxParachains<T, I> {
fn get() -> Option<u32> {
Some(T::ParaStoredHeaderDataBuilder::supported_parachains())
}
}
/// Returns total number of all parachains hashes/heads, stored by the pallet.
pub struct MaybeMaxTotalParachainHashes<T, I>(PhantomData<(T, I)>);
impl<T: Config<I>, I: 'static> Get<Option<u32>> for MaybeMaxTotalParachainHashes<T, I> {
fn get() -> Option<u32> {
Some(
T::ParaStoredHeaderDataBuilder::supported_parachains()
.saturating_mul(T::HeadsToKeep::get()),
)
}
}
}
/// Single parachain header chain adapter.
pub struct ParachainHeaders<T, I, C>(PhantomData<(T, I, C)>);
impl<T: Config<I>, I: 'static, C: Parachain<Hash = ParaHash>> HeaderChain<C>
for ParachainHeaders<T, I, C>
{
fn finalized_header_state_root(hash: HashOf<C>) -> Option<HashOf<C>> {
Pallet::<T, I>::parachain_head(ParaId(C::PARACHAIN_ID), hash)
.and_then(|head| head.decode_parachain_head_data::<C>().ok())
.map(|h| h.state_root)
}
}
/// (Re)initialize pallet with given header for using it in `pallet-bridge-messages` benchmarks.
#[cfg(feature = "runtime-benchmarks")]
pub fn initialize_for_benchmarks<T: Config<I>, I: 'static, PC: Parachain<Hash = ParaHash>>(
header: HeaderOf<PC>,
) {
let parachain = ParaId(PC::PARACHAIN_ID);
let parachain_head = ParaHead(header.encode());
let updated_head_data = T::ParaStoredHeaderDataBuilder::try_build(parachain, &parachain_head)
.expect("failed to build stored parachain head in benchmarks");
Pallet::<T, I>::update_parachain_head(
parachain,
None,
(0, Default::default()),
updated_head_data,
parachain_head.hash(),
)
.expect("failed to insert parachain head in benchmarks");
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::mock::{
run_test, test_relay_header, BigParachain, BigParachainHeader, FreeHeadersInterval,
RegularParachainHasher, RegularParachainHeader, RelayBlockHeader,
RuntimeEvent as TestEvent, RuntimeOrigin, TestRuntime, UNTRACKED_PARACHAIN_ID,
};
use bp_test_utils::prepare_parachain_heads_proof;
use codec::Encode;
use bp_header_chain::{justification::GrandpaJustification, StoredHeaderGrandpaInfo};
use bp_parachains::{
BestParaHeadHash, BridgeParachainCall, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider,
};
use bp_runtime::{
BasicOperatingMode, OwnedBridgeModuleError, StorageDoubleMapKeyProvider,
StorageMapKeyProvider,
};
use bp_test_utils::{
authority_list, generate_owned_bridge_module_tests, make_default_justification,
TEST_GRANDPA_SET_ID,
};
use frame_support::{
assert_noop, assert_ok,
dispatch::DispatchResultWithPostInfo,
pallet_prelude::Pays,
storage::generator::{StorageDoubleMap, StorageMap},
traits::Get,
weights::Weight,
};
use frame_system::{EventRecord, Pallet as System, Phase};
use sp_core::Hasher;
use sp_runtime::{traits::Header as HeaderT, DispatchError};
type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1;
type WeightInfo = <TestRuntime as Config>::WeightInfo;
type DbWeight = <TestRuntime as frame_system::Config>::DbWeight;
pub(crate) fn initialize(state_root: RelayBlockHash) -> RelayBlockHash {
pallet_bridge_grandpa::FreeHeadersRemaining::<TestRuntime, BridgesGrandpaPalletInstance>::set(Some(100));
pallet_bridge_grandpa::Pallet::<TestRuntime, BridgesGrandpaPalletInstance>::initialize(
RuntimeOrigin::root(),
bp_header_chain::InitializationData {
header: Box::new(test_relay_header(0, state_root)),
authority_list: authority_list(),
set_id: 1,
operating_mode: BasicOperatingMode::Normal,
},
)
.unwrap();
System::<TestRuntime>::set_block_number(1);
System::<TestRuntime>::reset_events();
test_relay_header(0, state_root).hash()
}
fn proceed(
num: RelayBlockNumber,
state_root: RelayBlockHash,
) -> (ParaHash, GrandpaJustification<RelayBlockHeader>) {
let header = test_relay_header(num, state_root);
let hash = header.hash();
let justification = make_default_justification(&header);
assert_ok!(
pallet_bridge_grandpa::Pallet::<TestRuntime, BridgesGrandpaPalletInstance>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(header),
justification.clone(),
TEST_GRANDPA_SET_ID,
false,
)
);
(hash, justification)
}
fn initial_best_head(parachain: u32) -> ParaInfo {
ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 0,
head_hash: head_data(parachain, 0).hash(),
},
next_imported_hash_position: 1,
}
}
pub(crate) fn head_data(parachain: u32, head_number: u32) -> ParaHead {
ParaHead(
RegularParachainHeader::new(
head_number as _,
Default::default(),
RegularParachainHasher::hash(&(parachain, head_number).encode()),
Default::default(),
Default::default(),
)
.encode(),
)
}
fn stored_head_data(parachain: u32, head_number: u32) -> ParaStoredHeaderData {
ParaStoredHeaderData(
(head_number as u64, RegularParachainHasher::hash(&(parachain, head_number).encode()))
.encode(),
)
}
fn big_head_data(parachain: u32, head_number: u32) -> ParaHead {
ParaHead(
BigParachainHeader::new(
head_number as _,
Default::default(),
RegularParachainHasher::hash(&(parachain, head_number).encode()),
Default::default(),
Default::default(),
)
.encode(),
)
}
fn big_stored_head_data(parachain: u32, head_number: u32) -> ParaStoredHeaderData {
ParaStoredHeaderData(
(head_number as u128, RegularParachainHasher::hash(&(parachain, head_number).encode()))
.encode(),
)
}
fn head_hash(parachain: u32, head_number: u32) -> ParaHash {
head_data(parachain, head_number).hash()
}
fn import_parachain_1_head(
relay_chain_block: RelayBlockNumber,
relay_state_root: RelayBlockHash,
parachains: Vec<(ParaId, ParaHash)>,
proof: ParaHeadsProof,
) -> DispatchResultWithPostInfo {
Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(relay_chain_block, test_relay_header(relay_chain_block, relay_state_root).hash()),
parachains,
proof,
)
}
fn weight_of_import_parachain_1_head(proof: &ParaHeadsProof, prune_expected: bool) -> Weight {
let db_weight = <TestRuntime as frame_system::Config>::DbWeight::get();
WeightInfoOf::<TestRuntime, ()>::submit_parachain_heads_weight(db_weight, proof, 1)
.saturating_sub(if prune_expected {
Weight::zero()
} else {
WeightInfoOf::<TestRuntime, ()>::parachain_head_pruning_weight(db_weight)
})
}
#[test]
fn submit_parachain_heads_checks_operating_mode() {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 0))]);
run_test(|| {
initialize(state_root);
// `submit_parachain_heads()` should fail when the pallet is halted.
PalletOperatingMode::<TestRuntime>::put(BasicOperatingMode::Halted);
assert_noop!(
Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains.clone(),
proof.clone(),
),
Error::<TestRuntime>::BridgeModule(OwnedBridgeModuleError::Halted)
);
// `submit_parachain_heads()` should succeed now that the pallet is resumed.
PalletOperatingMode::<TestRuntime>::put(BasicOperatingMode::Normal);
assert_ok!(Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
),);
});
}
#[test]
fn imports_initial_parachain_heads() {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![
(1, head_data(1, 0)),
(3, head_data(3, 10)),
]);
run_test(|| {
initialize(state_root);
// we're trying to update heads of parachains 1 and 3
let expected_weight =
WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 2);
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
);
assert_ok!(result);
assert_eq!(result.expect("checked above").pays_fee, Pays::Yes);
assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight));
// 1 and 3 are updated, because proof is missing head of parachain#2
assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(1)), Some(initial_best_head(1)));
assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(2)), None);
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(3)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 0,
head_hash: head_data(3, 10).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(
ParaId(1),
initial_best_head(1).best_head_hash.head_hash
)
.map(|h| h.into_inner()),
Some(stored_head_data(1, 0))
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(
ParaId(2),
initial_best_head(2).best_head_hash.head_hash
)
.map(|h| h.into_inner()),
None
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(ParaId(3), head_hash(3, 10))
.map(|h| h.into_inner()),
Some(stored_head_data(3, 10))
);
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: initial_best_head(1).best_head_hash.head_hash,
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(3),
parachain_head_hash: head_data(3, 10).hash(),
}),
topics: vec![],
}
],
);
});
}
#[test]
fn imports_parachain_heads_is_able_to_progress() {
let (state_root_5, proof_5, parachains_5) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 5))]);
let (state_root_10, proof_10, parachains_10) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 10))]);
run_test(|| {
// start with relay block #0 and import head#5 of parachain#1
initialize(state_root_5);
let result = import_parachain_1_head(0, state_root_5, parachains_5, proof_5);
// first parachain head is imported for free
assert_eq!(result.unwrap().pays_fee, Pays::No);
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(1)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 0,
head_hash: head_data(1, 5).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 5).hash())
.map(|h| h.into_inner()),
Some(stored_head_data(1, 5))
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 10).hash())
.map(|h| h.into_inner()),
None
);
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 5).hash(),
}),
topics: vec![],
}],
);
// import head#10 of parachain#1 at relay block #1
let (relay_1_hash, justification) = proceed(1, state_root_10);
let result = import_parachain_1_head(1, state_root_10, parachains_10, proof_10);
// second parachain head is imported for fee
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(1)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 1,
head_hash: head_data(1, 10).hash()
},
next_imported_hash_position: 2,
})
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 5).hash())
.map(|h| h.into_inner()),
Some(stored_head_data(1, 5))
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 10).hash())
.map(|h| h.into_inner()),
Some(stored_head_data(1, 10))
);
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 5).hash(),
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Grandpa1(
pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader {
number: 1,
hash: relay_1_hash,
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification,
new_verification_context: None,
},
}
),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 10).hash(),
}),
topics: vec![],
}
],
);
});
}
#[test]
fn ignores_untracked_parachain() {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![
(1, head_data(1, 5)),
(UNTRACKED_PARACHAIN_ID, head_data(1, 5)),
(2, head_data(1, 5)),
]);
run_test(|| {
// start with relay block #0 and try to import head#5 of parachain#1 and untracked
// parachain
let expected_weight =
WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 3)
.saturating_sub(WeightInfo::parachain_head_storage_write_weight(
DbWeight::get(),
));
initialize(state_root);
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
);
assert_ok!(result);
assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight));
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(1)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 0,
head_hash: head_data(1, 5).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(UNTRACKED_PARACHAIN_ID)), None,);
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(2)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 0,
head_hash: head_data(1, 5).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 5).hash(),
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UntrackedParachainRejected {
parachain: ParaId(UNTRACKED_PARACHAIN_ID),
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(2),
parachain_head_hash: head_data(1, 5).hash(),
}),
topics: vec![],
}
],
);
});
}
#[test]
fn does_nothing_when_already_imported_this_head_at_previous_relay_header() {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 0))]);
run_test(|| {
// import head#0 of parachain#1 at relay block#0
initialize(state_root);
assert_ok!(import_parachain_1_head(0, state_root, parachains.clone(), proof.clone()));
assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(1)), Some(initial_best_head(1)));
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: initial_best_head(1).best_head_hash.head_hash,
}),
topics: vec![],
}],
);
// try to import head#0 of parachain#1 at relay block#1
// => call succeeds, but nothing is changed
let (relay_1_hash, justification) = proceed(1, state_root);
assert_ok!(import_parachain_1_head(1, state_root, parachains, proof));
assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(1)), Some(initial_best_head(1)));
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: initial_best_head(1).best_head_hash.head_hash,
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Grandpa1(
pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader {
number: 1,
hash: relay_1_hash,
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification,
new_verification_context: None,
}
}
),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead {
parachain: ParaId(1),
parachain_head_hash: initial_best_head(1).best_head_hash.head_hash,
}),
topics: vec![],
}
],
);
});
}
#[test]
fn does_nothing_when_already_imported_head_at_better_relay_header() {
let (state_root_5, proof_5, parachains_5) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 5))]);
let (state_root_10, proof_10, parachains_10) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 10))]);
run_test(|| {
// start with relay block #0
initialize(state_root_5);
// head#10 of parachain#1 at relay block#1
let (relay_1_hash, justification) = proceed(1, state_root_10);
assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10));
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(1)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 1,
head_hash: head_data(1, 10).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Grandpa1(
pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader {
number: 1,
hash: relay_1_hash,
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification.clone(),
new_verification_context: None,
}
}
),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 10).hash(),
}),
topics: vec![],
}
],
);
// now try to import head#5 at relay block#0
// => nothing is changed, because better head has already been imported
assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5));
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(1)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 1,
head_hash: head_data(1, 10).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Grandpa1(
pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader {
number: 1,
hash: relay_1_hash,
grandpa_info: StoredHeaderGrandpaInfo {
finality_proof: justification,
new_verification_context: None,
}
}
),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 10).hash(),
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 5).hash(),
}),
topics: vec![],
}
],
);
});
}
#[test]
fn does_nothing_when_parachain_head_is_too_large() {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![
(1, head_data(1, 5)),
(4, big_head_data(1, 5)),
]);
run_test(|| {
// start with relay block #0 and try to import head#5 of parachain#1 and big parachain
initialize(state_root);
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
);
assert_ok!(result);
assert_eq!(
ParasInfo::<TestRuntime>::get(ParaId(1)),
Some(ParaInfo {
best_head_hash: BestParaHeadHash {
at_relay_block_number: 0,
head_hash: head_data(1, 5).hash()
},
next_imported_hash_position: 1,
})
);
assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(4)), None);
assert_eq!(
System::<TestRuntime>::events(),
vec![
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::UpdatedParachainHead {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 5).hash(),
}),
topics: vec![],
},
EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::RejectedLargeParachainHead {
parachain: ParaId(4),
parachain_head_hash: big_head_data(1, 5).hash(),
parachain_head_size: big_stored_head_data(1, 5).encoded_size() as u32,
}),
topics: vec![],
},
],
);
});
}
#[test]
fn prunes_old_heads() {
run_test(|| {
let heads_to_keep = crate::mock::HeadsToKeep::get();
// import exactly `HeadsToKeep` headers
for i in 0..heads_to_keep {
let (state_root, proof, parachains) = prepare_parachain_heads_proof::<
RegularParachainHeader,
>(vec![(1, head_data(1, i))]);
if i == 0 {
initialize(state_root);
} else {
proceed(i, state_root);
}
let expected_weight = weight_of_import_parachain_1_head(&proof, false);
let result = import_parachain_1_head(i, state_root, parachains, proof);
assert_ok!(result);
assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight));
}
// nothing is pruned yet
for i in 0..heads_to_keep {
assert!(ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, i).hash())
.is_some());
}
// import next relay chain header and next parachain head
let (state_root, proof, parachains) = prepare_parachain_heads_proof::<
RegularParachainHeader,
>(vec![(1, head_data(1, heads_to_keep))]);
proceed(heads_to_keep, state_root);
let expected_weight = weight_of_import_parachain_1_head(&proof, true);
let result = import_parachain_1_head(heads_to_keep, state_root, parachains, proof);
assert_ok!(result);
assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight));
// and the head#0 is pruned
assert!(
ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 0).hash()).is_none()
);
for i in 1..=heads_to_keep {
assert!(ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, i).hash())
.is_some());
}
});
}
#[test]
fn fails_on_unknown_relay_chain_block() {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 5))]);
run_test(|| {
// start with relay block #0
initialize(state_root);
// try to import head#5 of parachain#1 at unknown relay chain block #1
assert_noop!(
import_parachain_1_head(1, state_root, parachains, proof),
Error::<TestRuntime>::UnknownRelayChainBlock
);
});
}
#[test]
fn fails_on_invalid_storage_proof() {
let (_state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 5))]);
run_test(|| {
// start with relay block #0
initialize(Default::default());
// try to import head#5 of parachain#1 at relay chain block #0
assert_noop!(
import_parachain_1_head(0, Default::default(), parachains, proof),
Error::<TestRuntime>::HeaderChainStorageProof(HeaderChainError::StorageProof(
StorageProofError::StorageRootMismatch
))
);
});
}
#[test]
fn is_not_rewriting_existing_head_if_failed_to_read_updated_head() {
let (state_root_5, proof_5, parachains_5) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 5))]);
let (state_root_10_at_20, proof_10_at_20, parachains_10_at_20) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(2, head_data(2, 10))]);
let (state_root_10_at_30, proof_10_at_30, parachains_10_at_30) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 10))]);
run_test(|| {
// we've already imported head#5 of parachain#1 at relay block#10
initialize(state_root_5);
import_parachain_1_head(0, state_root_5, parachains_5, proof_5).expect("ok");
assert_eq!(
Pallet::<TestRuntime>::best_parachain_head(ParaId(1)),
Some(stored_head_data(1, 5))
);
// then if someone is pretending to provide updated head#10 of parachain#1 at relay
// block#20, but fails to do that
//
// => we'll leave previous value
proceed(20, state_root_10_at_20);
assert_ok!(Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(20, test_relay_header(20, state_root_10_at_20).hash()),
parachains_10_at_20,
proof_10_at_20,
),);
assert_eq!(
Pallet::<TestRuntime>::best_parachain_head(ParaId(1)),
Some(stored_head_data(1, 5))
);
// then if someone is pretending to provide updated head#10 of parachain#1 at relay
// block#30, and actually provides it
//
// => we'll update value
proceed(30, state_root_10_at_30);
assert_ok!(Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(30, test_relay_header(30, state_root_10_at_30).hash()),
parachains_10_at_30,
proof_10_at_30,
),);
assert_eq!(
Pallet::<TestRuntime>::best_parachain_head(ParaId(1)),
Some(stored_head_data(1, 10))
);
});
}
#[test]
fn storage_keys_computed_properly() {
assert_eq!(
ParasInfo::<TestRuntime>::storage_map_final_key(ParaId(42)).to_vec(),
ParasInfoKeyProvider::final_key("Parachains", &ParaId(42)).0
);
assert_eq!(
ImportedParaHeads::<TestRuntime>::storage_double_map_final_key(
ParaId(42),
ParaHash::from([21u8; 32])
)
.to_vec(),
ImportedParaHeadsKeyProvider::final_key(
"Parachains",
&ParaId(42),
&ParaHash::from([21u8; 32])
)
.0,
);
}
#[test]
fn ignores_parachain_head_if_it_is_missing_from_storage_proof() {
let (state_root, proof, _) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![]);
let parachains = vec![(ParaId(2), Default::default())];
run_test(|| {
initialize(state_root);
assert_ok!(Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
));
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::MissingParachainHead {
parachain: ParaId(2),
}),
topics: vec![],
}],
);
});
}
#[test]
fn ignores_parachain_head_if_parachain_head_hash_is_wrong() {
let (state_root, proof, _) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 0))]);
let parachains = vec![(ParaId(1), head_data(1, 10).hash())];
run_test(|| {
initialize(state_root);
assert_ok!(Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
));
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Parachains(Event::IncorrectParachainHeadHash {
parachain: ParaId(1),
parachain_head_hash: head_data(1, 10).hash(),
actual_parachain_head_hash: head_data(1, 0).hash(),
}),
topics: vec![],
}],
);
});
}
#[test]
fn test_bridge_parachain_call_is_correctly_defined() {
let (state_root, proof, _) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 0))]);
let parachains = vec![(ParaId(2), Default::default())];
let relay_header_id = (0, test_relay_header(0, state_root).hash());
let direct_submit_parachain_heads_call = Call::<TestRuntime>::submit_parachain_heads {
at_relay_block: relay_header_id,
parachains: parachains.clone(),
parachain_heads_proof: proof.clone(),
};
let indirect_submit_parachain_heads_call = BridgeParachainCall::submit_parachain_heads {
at_relay_block: relay_header_id,
parachains,
parachain_heads_proof: proof,
};
assert_eq!(
direct_submit_parachain_heads_call.encode(),
indirect_submit_parachain_heads_call.encode()
);
}
generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted);
#[test]
fn maybe_max_parachains_returns_correct_value() {
assert_eq!(MaybeMaxParachains::<TestRuntime, ()>::get(), Some(mock::TOTAL_PARACHAINS));
}
#[test]
fn maybe_max_total_parachain_hashes_returns_correct_value() {
assert_eq!(
MaybeMaxTotalParachainHashes::<TestRuntime, ()>::get(),
Some(mock::TOTAL_PARACHAINS * mock::HeadsToKeep::get()),
);
}
#[test]
fn submit_finality_proof_requires_signed_origin() {
run_test(|| {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(1, head_data(1, 0))]);
initialize(state_root);
// `submit_parachain_heads()` should fail when the pallet is halted.
assert_noop!(
Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::root(),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
),
DispatchError::BadOrigin
);
})
}
#[test]
fn may_be_free_for_submitting_filtered_heads() {
run_test(|| {
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(2, head_data(2, 5))]);
// start with relay block #0 and import head#5 of parachain#2
initialize(state_root);
// first submission is free
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains.clone(),
proof.clone(),
);
assert_eq!(result.unwrap().pays_fee, Pays::No);
// next submission is NOT free, because we haven't updated anything
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(0, test_relay_header(0, state_root).hash()),
parachains,
proof,
);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
// then we submit new head, proved at relay block `FreeHeadersInterval - 1` => Pays::Yes
let (state_root, proof, parachains) = prepare_parachain_heads_proof::<
RegularParachainHeader,
>(vec![(2, head_data(2, 50))]);
let relay_block_number = FreeHeadersInterval::get() - 1;
proceed(relay_block_number, state_root);
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(relay_block_number, test_relay_header(relay_block_number, state_root).hash()),
parachains,
proof,
);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
// then we submit new head, proved after `FreeHeadersInterval` => Pays::No
let (state_root, proof, parachains) = prepare_parachain_heads_proof::<
RegularParachainHeader,
>(vec![(2, head_data(2, 100))]);
let relay_block_number = relay_block_number + FreeHeadersInterval::get();
proceed(relay_block_number, state_root);
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(relay_block_number, test_relay_header(relay_block_number, state_root).hash()),
parachains,
proof,
);
assert_eq!(result.unwrap().pays_fee, Pays::No);
// then we submit new BIG head, proved after `FreeHeadersInterval` => Pays::Yes
// then we submit new head, proved after `FreeHeadersInterval` => Pays::No
let mut large_head = head_data(2, 100);
large_head.0.extend(&[42u8; BigParachain::MAX_HEADER_SIZE as _]);
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(2, large_head)]);
let relay_block_number = relay_block_number + FreeHeadersInterval::get();
proceed(relay_block_number, state_root);
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(relay_block_number, test_relay_header(relay_block_number, state_root).hash()),
parachains,
proof,
);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
})
}
#[test]
fn grandpa_and_parachain_pallets_share_free_headers_counter() {
run_test(|| {
initialize(Default::default());
// set free headers limit to `4`
let mut free_headers_remaining = 4;
pallet_bridge_grandpa::FreeHeadersRemaining::<TestRuntime, BridgesGrandpaPalletInstance>::set(
Some(free_headers_remaining),
);
// import free GRANDPA and parachain headers
let mut relay_block_number = 0;
for i in 0..2 {
// import free GRANDPA header
let (state_root, proof, parachains) = prepare_parachain_heads_proof::<
RegularParachainHeader,
>(vec![(2, head_data(2, 5 + i))]);
relay_block_number = relay_block_number + FreeHeadersInterval::get();
proceed(relay_block_number, state_root);
assert_eq!(
pallet_bridge_grandpa::FreeHeadersRemaining::<
TestRuntime,
BridgesGrandpaPalletInstance,
>::get(),
Some(free_headers_remaining - 1),
);
free_headers_remaining = free_headers_remaining - 1;
// import free parachain header
assert_ok!(Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(relay_block_number, test_relay_header(relay_block_number, state_root).hash()),
parachains,
proof,
),);
assert_eq!(
pallet_bridge_grandpa::FreeHeadersRemaining::<
TestRuntime,
BridgesGrandpaPalletInstance,
>::get(),
Some(free_headers_remaining - 1),
);
free_headers_remaining = free_headers_remaining - 1;
}
// try to import free GRANDPA header => non-free execution
let (state_root, proof, parachains) =
prepare_parachain_heads_proof::<RegularParachainHeader>(vec![(2, head_data(2, 7))]);
relay_block_number = relay_block_number + FreeHeadersInterval::get();
let result = pallet_bridge_grandpa::Pallet::<TestRuntime, BridgesGrandpaPalletInstance>::submit_finality_proof_ex(
RuntimeOrigin::signed(1),
Box::new(test_relay_header(relay_block_number, state_root)),
make_default_justification(&test_relay_header(relay_block_number, state_root)),
TEST_GRANDPA_SET_ID,
false,
);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
// try to import free parachain header => non-free execution
let result = Pallet::<TestRuntime>::submit_parachain_heads(
RuntimeOrigin::signed(1),
(relay_block_number, test_relay_header(relay_block_number, state_root).hash()),
parachains,
proof,
);
assert_eq!(result.unwrap().pays_fee, Pays::Yes);
assert_eq!(
pallet_bridge_grandpa::FreeHeadersRemaining::<
TestRuntime,
BridgesGrandpaPalletInstance,
>::get(),
Some(0),
);
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use bp_header_chain::ChainWithGrandpa;
use bp_polkadot_core::parachains::ParaId;
use bp_runtime::{Chain, ChainId, Parachain};
use frame_support::{
construct_runtime, derive_impl, parameter_types, traits::ConstU32, weights::Weight,
};
use sp_runtime::{
testing::H256,
traits::{BlakeTwo256, Header as HeaderT},
MultiSignature,
};
use crate as pallet_bridge_parachains;
pub type AccountId = u64;
pub type RelayBlockHeader =
sp_runtime::generic::Header<crate::RelayBlockNumber, crate::RelayBlockHasher>;
type Block = frame_system::mocking::MockBlock<TestRuntime>;
pub const PARAS_PALLET_NAME: &str = "Paras";
pub const UNTRACKED_PARACHAIN_ID: u32 = 10;
// use exact expected encoded size: `vec_len_size + header_number_size + state_root_hash_size`
pub const MAXIMAL_PARACHAIN_HEAD_DATA_SIZE: u32 = 1 + 8 + 32;
// total parachains that we use in tests
pub const TOTAL_PARACHAINS: u32 = 4;
pub type RegularParachainHeader = sp_runtime::testing::Header;
pub type RegularParachainHasher = BlakeTwo256;
pub type BigParachainHeader = sp_runtime::generic::Header<u128, BlakeTwo256>;
pub struct Parachain1;
impl Chain for Parachain1 {
const ID: ChainId = *b"pch1";
type BlockNumber = u64;
type Hash = H256;
type Hasher = RegularParachainHasher;
type Header = RegularParachainHeader;
type AccountId = u64;
type Balance = u64;
type Nonce = u64;
type Signature = MultiSignature;
fn max_extrinsic_size() -> u32 {
0
}
fn max_extrinsic_weight() -> Weight {
Weight::zero()
}
}
impl Parachain for Parachain1 {
const PARACHAIN_ID: u32 = 1;
const MAX_HEADER_SIZE: u32 = 1_024;
}
pub struct Parachain2;
impl Chain for Parachain2 {
const ID: ChainId = *b"pch2";
type BlockNumber = u64;
type Hash = H256;
type Hasher = RegularParachainHasher;
type Header = RegularParachainHeader;
type AccountId = u64;
type Balance = u64;
type Nonce = u64;
type Signature = MultiSignature;
fn max_extrinsic_size() -> u32 {
0
}
fn max_extrinsic_weight() -> Weight {
Weight::zero()
}
}
impl Parachain for Parachain2 {
const PARACHAIN_ID: u32 = 2;
const MAX_HEADER_SIZE: u32 = 1_024;
}
pub struct Parachain3;
impl Chain for Parachain3 {
const ID: ChainId = *b"pch3";
type BlockNumber = u64;
type Hash = H256;
type Hasher = RegularParachainHasher;
type Header = RegularParachainHeader;
type AccountId = u64;
type Balance = u64;
type Nonce = u64;
type Signature = MultiSignature;
fn max_extrinsic_size() -> u32 {
0
}
fn max_extrinsic_weight() -> Weight {
Weight::zero()
}
}
impl Parachain for Parachain3 {
const PARACHAIN_ID: u32 = 3;
const MAX_HEADER_SIZE: u32 = 1_024;
}
// this parachain is using u128 as block number and stored head data size exceeds limit
pub struct BigParachain;
impl Chain for BigParachain {
const ID: ChainId = *b"bpch";
type BlockNumber = u128;
type Hash = H256;
type Hasher = RegularParachainHasher;
type Header = BigParachainHeader;
type AccountId = u64;
type Balance = u64;
type Nonce = u64;
type Signature = MultiSignature;
fn max_extrinsic_size() -> u32 {
0
}
fn max_extrinsic_weight() -> Weight {
Weight::zero()
}
}
impl Parachain for BigParachain {
const PARACHAIN_ID: u32 = 4;
const MAX_HEADER_SIZE: u32 = 2_048;
}
construct_runtime! {
pub enum TestRuntime
{
System: frame_system::{Pallet, Call, Config<T>, Storage, Event<T>},
Grandpa1: pallet_bridge_grandpa::<Instance1>::{Pallet, Event<T>},
Grandpa2: pallet_bridge_grandpa::<Instance2>::{Pallet, Event<T>},
Parachains: pallet_bridge_parachains::{Call, Pallet, Event<T>},
}
}
#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for TestRuntime {
type Block = Block;
}
parameter_types! {
pub const HeadersToKeep: u32 = 5;
pub const FreeHeadersInterval: u32 = 15;
}
impl pallet_bridge_grandpa::Config<pallet_bridge_grandpa::Instance1> for TestRuntime {
type RuntimeEvent = RuntimeEvent;
type BridgedChain = TestBridgedChain;
type MaxFreeHeadersPerBlock = ConstU32<2>;
type FreeHeadersInterval = FreeHeadersInterval;
type HeadersToKeep = HeadersToKeep;
type WeightInfo = ();
}
impl pallet_bridge_grandpa::Config<pallet_bridge_grandpa::Instance2> for TestRuntime {
type RuntimeEvent = RuntimeEvent;
type BridgedChain = TestBridgedChain;
type MaxFreeHeadersPerBlock = ConstU32<2>;
type FreeHeadersInterval = FreeHeadersInterval;
type HeadersToKeep = HeadersToKeep;
type WeightInfo = ();
}
parameter_types! {
pub const HeadsToKeep: u32 = 4;
pub const ParasPalletName: &'static str = PARAS_PALLET_NAME;
pub GetTenFirstParachains: Vec<ParaId> = (0..10).map(ParaId).collect();
}
impl pallet_bridge_parachains::Config for TestRuntime {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = ();
type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1;
type ParasPalletName = ParasPalletName;
type ParaStoredHeaderDataBuilder = (Parachain1, Parachain2, Parachain3, BigParachain);
type HeadsToKeep = HeadsToKeep;
type MaxParaHeadDataSize = ConstU32<MAXIMAL_PARACHAIN_HEAD_DATA_SIZE>;
}
#[cfg(feature = "runtime-benchmarks")]
impl pallet_bridge_parachains::benchmarking::Config<()> for TestRuntime {
fn parachains() -> Vec<ParaId> {
vec![
ParaId(Parachain1::PARACHAIN_ID),
ParaId(Parachain2::PARACHAIN_ID),
ParaId(Parachain3::PARACHAIN_ID),
]
}
fn prepare_parachain_heads_proof(
parachains: &[ParaId],
_parachain_head_size: u32,
_proof_size: bp_runtime::StorageProofSize,
) -> (
crate::RelayBlockNumber,
crate::RelayBlockHash,
bp_polkadot_core::parachains::ParaHeadsProof,
Vec<(ParaId, bp_polkadot_core::parachains::ParaHash)>,
) {
// in mock run we only care about benchmarks correctness, not the benchmark results
// => ignore size related arguments
let (state_root, proof, parachains) =
bp_test_utils::prepare_parachain_heads_proof::<RegularParachainHeader>(
parachains.iter().map(|p| (p.0, crate::tests::head_data(p.0, 1))).collect(),
);
let relay_genesis_hash = crate::tests::initialize(state_root);
(0, relay_genesis_hash, proof, parachains)
}
}
#[derive(Debug)]
pub struct TestBridgedChain;
impl Chain for TestBridgedChain {
const ID: ChainId = *b"tbch";
type BlockNumber = crate::RelayBlockNumber;
type Hash = crate::RelayBlockHash;
type Hasher = crate::RelayBlockHasher;
type Header = RelayBlockHeader;
type AccountId = AccountId;
type Balance = u32;
type Nonce = u32;
type Signature = sp_runtime::testing::TestSignature;
fn max_extrinsic_size() -> u32 {
unreachable!()
}
fn max_extrinsic_weight() -> Weight {
unreachable!()
}
}
impl ChainWithGrandpa for TestBridgedChain {
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "";
const MAX_AUTHORITIES_COUNT: u32 = 16;
const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8;
const MAX_MANDATORY_HEADER_SIZE: u32 = 256;
const AVERAGE_HEADER_SIZE: u32 = 64;
}
#[derive(Debug)]
pub struct OtherBridgedChain;
impl Chain for OtherBridgedChain {
const ID: ChainId = *b"obch";
type BlockNumber = u64;
type Hash = crate::RelayBlockHash;
type Hasher = crate::RelayBlockHasher;
type Header = sp_runtime::generic::Header<u64, crate::RelayBlockHasher>;
type AccountId = AccountId;
type Balance = u32;
type Nonce = u32;
type Signature = sp_runtime::testing::TestSignature;
fn max_extrinsic_size() -> u32 {
unreachable!()
}
fn max_extrinsic_weight() -> Weight {
unreachable!()
}
}
impl ChainWithGrandpa for OtherBridgedChain {
const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "";
const MAX_AUTHORITIES_COUNT: u32 = 16;
const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8;
const MAX_MANDATORY_HEADER_SIZE: u32 = 256;
const AVERAGE_HEADER_SIZE: u32 = 64;
}
/// Return test externalities to use in tests.
pub fn new_test_ext() -> sp_io::TestExternalities {
sp_io::TestExternalities::new(Default::default())
}
/// Run pallet test.
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
new_test_ext().execute_with(|| {
System::set_block_number(1);
System::reset_events();
test()
})
}
/// Return test relay chain header with given number.
pub fn test_relay_header(
num: crate::RelayBlockNumber,
state_root: crate::RelayBlockHash,
) -> RelayBlockHeader {
RelayBlockHeader::new(
num,
Default::default(),
state_root,
Default::default(),
Default::default(),
)
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Autogenerated weights for pallet_bridge_parachains
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-03-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// target/release/unknown-bridge-node
// benchmark
// pallet
// --chain=dev
// --steps=50
// --repeat=20
// --pallet=pallet_bridge_parachains
// --extrinsic=*
// --execution=wasm
// --wasm-execution=Compiled
// --heap-pages=4096
// --output=./modules/parachains/src/weights.rs
// --template=./.maintain/bridge-weight-template.hbs
#![allow(clippy::all)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{
traits::Get,
weights::{constants::RocksDbWeight, Weight},
};
use sp_std::marker::PhantomData;
/// Weight functions needed for pallet_bridge_parachains.
pub trait WeightInfo {
fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight;
fn submit_parachain_heads_with_1kb_proof() -> Weight;
fn submit_parachain_heads_with_16kb_proof() -> Weight;
}
/// Weights for `pallet_bridge_parachains` that are generated using one of the Bridge testnets.
///
/// Those weights are test only and must never be used in production.
pub struct BridgeWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for BridgeWeight<T> {
/// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added:
/// 555, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size:
/// Some(64), added: 1549, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size:
/// Some(196), added: 1681, mode: MaxEncodedLen)
///
/// The range of component `p` is `[1, 2]`.
fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `366`
// Estimated: `4648`
// Minimum execution time: 36_701 nanoseconds.
Weight::from_parts(38_597_828, 4648)
// Standard Error: 190_859
.saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into()))
.saturating_add(T::DbWeight::get().reads(4_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
}
/// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added:
/// 555, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size:
/// Some(64), added: 1549, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size:
/// Some(196), added: 1681, mode: MaxEncodedLen)
fn submit_parachain_heads_with_1kb_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `366`
// Estimated: `4648`
// Minimum execution time: 38_189 nanoseconds.
Weight::from_parts(39_252_000, 4648)
.saturating_add(T::DbWeight::get().reads(4_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
}
/// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added:
/// 555, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size:
/// Some(64), added: 1549, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size:
/// Some(196), added: 1681, mode: MaxEncodedLen)
fn submit_parachain_heads_with_16kb_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `366`
// Estimated: `4648`
// Minimum execution time: 62_868 nanoseconds.
Weight::from_parts(63_581_000, 4648)
.saturating_add(T::DbWeight::get().reads(4_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added:
/// 555, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size:
/// Some(64), added: 1549, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size:
/// Some(196), added: 1681, mode: MaxEncodedLen)
///
/// The range of component `p` is `[1, 2]`.
fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `366`
// Estimated: `4648`
// Minimum execution time: 36_701 nanoseconds.
Weight::from_parts(38_597_828, 4648)
// Standard Error: 190_859
.saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into()))
.saturating_add(RocksDbWeight::get().reads(4_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
}
/// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added:
/// 555, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size:
/// Some(64), added: 1549, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size:
/// Some(196), added: 1681, mode: MaxEncodedLen)
fn submit_parachain_heads_with_1kb_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `366`
// Estimated: `4648`
// Minimum execution time: 38_189 nanoseconds.
Weight::from_parts(39_252_000, 4648)
.saturating_add(RocksDbWeight::get().reads(4_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
}
/// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0)
///
/// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1),
/// added: 496, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0)
///
/// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added:
/// 555, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size:
/// Some(64), added: 1549, mode: MaxEncodedLen)
///
/// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1)
///
/// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size:
/// Some(196), added: 1681, mode: MaxEncodedLen)
fn submit_parachain_heads_with_16kb_proof() -> Weight {
// Proof Size summary in bytes:
// Measured: `366`
// Estimated: `4648`
// Minimum execution time: 62_868 nanoseconds.
Weight::from_parts(63_581_000, 4648)
.saturating_add(RocksDbWeight::get().reads(4_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Weight-related utilities.
use crate::weights::{BridgeWeight, WeightInfo};
use bp_runtime::Size;
use frame_support::weights::{RuntimeDbWeight, Weight};
/// Size of the regular parachain head.
///
/// It's not that we are expecting all parachain heads to share the same size or that we would
/// reject all heads that have larger/lesser size. It is about head size that we use in benchmarks.
/// Relayer would need to pay additional fee for extra bytes.
///
/// 384 is a bit larger (1.3 times) than the size of the randomly chosen Polkadot block.
pub const DEFAULT_PARACHAIN_HEAD_SIZE: u32 = 384;
/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at
/// some generic chain.
pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024;
/// Extended weight info.
pub trait WeightInfoExt: WeightInfo {
// Our configuration assumes that the runtime has special signed extensions used to:
//
// 1) boost priority of `submit_parachain_heads` transactions;
//
// 2) slash relayer if he submits an invalid transaction.
//
// We read and update storage values of other pallets (`pallet-bridge-relayers` and
// balances/assets pallet). So we need to add this weight to the weight of our call.
// Hence two following methods.
/// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions
/// that are declared at runtime level.
fn submit_parachain_heads_overhead_from_runtime() -> Weight;
/// Storage proof overhead, that is included in every storage proof.
///
/// The relayer would pay some extra fee for additional proof bytes, since they mean
/// more hashing operations.
fn expected_extra_storage_proof_size() -> u32;
/// Weight of the parachain heads delivery extrinsic.
fn submit_parachain_heads_weight(
db_weight: RuntimeDbWeight,
proof: &impl Size,
parachains_count: u32,
) -> Weight {
// weight of the `submit_parachain_heads` with exactly `parachains_count` parachain
// heads of the default size (`DEFAULT_PARACHAIN_HEAD_SIZE`)
let base_weight = Self::submit_parachain_heads_with_n_parachains(parachains_count);
// overhead because of extra storage proof bytes
let expected_proof_size = parachains_count
.saturating_mul(DEFAULT_PARACHAIN_HEAD_SIZE)
.saturating_add(Self::expected_extra_storage_proof_size());
let actual_proof_size = proof.size();
let proof_size_overhead = Self::storage_proof_size_overhead(
actual_proof_size.saturating_sub(expected_proof_size),
);
// potential pruning weight (refunded if hasn't happened)
let pruning_weight =
Self::parachain_head_pruning_weight(db_weight).saturating_mul(parachains_count as u64);
base_weight
.saturating_add(proof_size_overhead)
.saturating_add(pruning_weight)
.saturating_add(Self::submit_parachain_heads_overhead_from_runtime())
}
/// Returns weight of single parachain head storage update.
///
/// This weight only includes db write operations that happens if parachain head is actually
/// updated. All extra weights (weight of storage proof validation, additional checks, ...) is
/// not included.
fn parachain_head_storage_write_weight(db_weight: RuntimeDbWeight) -> Weight {
// it's just a couple of operations - we need to write the hash (`ImportedParaHashes`) and
// the head itself (`ImportedParaHeads`. Pruning is not included here
db_weight.writes(2)
}
/// Returns weight of single parachain head pruning.
fn parachain_head_pruning_weight(db_weight: RuntimeDbWeight) -> Weight {
// it's just one write operation, we don't want any benchmarks for that
db_weight.writes(1)
}
/// Returns weight that needs to be accounted when storage proof of given size is received.
fn storage_proof_size_overhead(extra_proof_bytes: u32) -> Weight {
let extra_byte_weight = (Self::submit_parachain_heads_with_16kb_proof() -
Self::submit_parachain_heads_with_1kb_proof()) /
(15 * 1024);
extra_byte_weight.saturating_mul(extra_proof_bytes as u64)
}
}
impl WeightInfoExt for () {
fn submit_parachain_heads_overhead_from_runtime() -> Weight {
Weight::zero()
}
fn expected_extra_storage_proof_size() -> u32 {
EXTRA_STORAGE_PROOF_SIZE
}
}
impl<T: frame_system::Config> WeightInfoExt for BridgeWeight<T> {
fn submit_parachain_heads_overhead_from_runtime() -> Weight {
Weight::zero()
}
fn expected_extra_storage_proof_size() -> u32 {
EXTRA_STORAGE_PROOF_SIZE
}
}
[package]
name = "pallet-bridge-relayers"
description = "Module used to store relayer rewards and coordinate relayers set."
version = "0.7.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[lints]
workspace = true
[dependencies]
codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false }
log = { workspace = true }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
# Bridge dependencies
bp-messages = { path = "../../primitives/messages", default-features = false }
bp-relayers = { path = "../../primitives/relayers", default-features = false }
bp-runtime = { path = "../../primitives/runtime", default-features = false }
pallet-bridge-messages = { path = "../messages", default-features = false }
# Substrate Dependencies
frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true }
frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false }
[dev-dependencies]
bp-runtime = { path = "../../primitives/runtime" }
pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" }
[features]
default = ["std"]
std = [
"bp-messages/std",
"bp-relayers/std",
"bp-runtime/std",
"codec/std",
"frame-benchmarking/std",
"frame-support/std",
"frame-system/std",
"log/std",
"pallet-bridge-messages/std",
"scale-info/std",
"sp-arithmetic/std",
"sp-runtime/std",
"sp-std/std",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"pallet-balances/runtime-benchmarks",
"pallet-bridge-messages/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"pallet-balances/try-runtime",
"pallet-bridge-messages/try-runtime",
"sp-runtime/try-runtime",
]
# Bridge Relayers Pallet
The pallet serves as a storage for pending bridge relayer rewards. Any runtime component may register reward
to some relayer for doing some useful job at some messages lane. Later, the relayer may claim its rewards
using the `claim_rewards` call.
The reward payment procedure is abstracted from the pallet code. One of possible implementations, is the
[`PayLaneRewardFromAccount`](../../primitives/relayers/src/lib.rs), which just does a `Currency::transfer`
call to relayer account from the relayer-rewards account, determined by the message lane id.
We have two examples of how this pallet is used in production. Rewards are registered at the target chain to
compensate fees of message delivery transactions (and linked finality delivery calls). At the source chain, rewards
are registered during delivery confirmation transactions. You may find more information about that in the
[Kusama <> Polkadot bridge](../../docs/polkadot-kusama-bridge-overview.md) documentation.
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Benchmarks for the relayers Pallet.
#![cfg(feature = "runtime-benchmarks")]
use crate::*;
use bp_messages::LaneId;
use bp_relayers::RewardsAccountOwner;
use frame_benchmarking::{benchmarks, whitelisted_caller};
use frame_system::RawOrigin;
use sp_runtime::traits::One;
/// Reward amount that is (hopefully) is larger than existential deposit across all chains.
const REWARD_AMOUNT: u32 = u32::MAX;
/// Pallet we're benchmarking here.
pub struct Pallet<T: Config>(crate::Pallet<T>);
/// Trait that must be implemented by runtime.
pub trait Config: crate::Config {
/// Prepare environment for paying given reward for serving given lane.
fn prepare_rewards_account(account_params: RewardsAccountParams, reward: Self::Reward);
/// Give enough balance to given account.
fn deposit_account(account: Self::AccountId, balance: Self::Reward);
}
benchmarks! {
// Benchmark `claim_rewards` call.
claim_rewards {
let lane = LaneId([0, 0, 0, 0]);
let account_params =
RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain);
let relayer: T::AccountId = whitelisted_caller();
let reward = T::Reward::from(REWARD_AMOUNT);
T::prepare_rewards_account(account_params, reward);
RelayerRewards::<T>::insert(&relayer, account_params, reward);
}: _(RawOrigin::Signed(relayer), account_params)
verify {
// we can't check anything here, because `PaymentProcedure` is responsible for
// payment logic, so we assume that if call has succeeded, the procedure has
// also completed successfully
}
// Benchmark `register` call.
register {
let relayer: T::AccountId = whitelisted_caller();
let valid_till = frame_system::Pallet::<T>::block_number()
.saturating_add(crate::Pallet::<T>::required_registration_lease())
.saturating_add(One::one())
.saturating_add(One::one());
T::deposit_account(relayer.clone(), crate::Pallet::<T>::required_stake());
}: _(RawOrigin::Signed(relayer.clone()), valid_till)
verify {
assert!(crate::Pallet::<T>::is_registration_active(&relayer));
}
// Benchmark `deregister` call.
deregister {
let relayer: T::AccountId = whitelisted_caller();
let valid_till = frame_system::Pallet::<T>::block_number()
.saturating_add(crate::Pallet::<T>::required_registration_lease())
.saturating_add(One::one())
.saturating_add(One::one());
T::deposit_account(relayer.clone(), crate::Pallet::<T>::required_stake());
crate::Pallet::<T>::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap();
frame_system::Pallet::<T>::set_block_number(valid_till.saturating_add(One::one()));
}: _(RawOrigin::Signed(relayer.clone()))
verify {
assert!(!crate::Pallet::<T>::is_registration_active(&relayer));
}
// Benchmark `slash_and_deregister` method of the pallet. We are adding this weight to
// the weight of message delivery call if `RefundBridgedParachainMessages` signed extension
// is deployed at runtime level.
slash_and_deregister {
// prepare and register relayer account
let relayer: T::AccountId = whitelisted_caller();
let valid_till = frame_system::Pallet::<T>::block_number()
.saturating_add(crate::Pallet::<T>::required_registration_lease())
.saturating_add(One::one())
.saturating_add(One::one());
T::deposit_account(relayer.clone(), crate::Pallet::<T>::required_stake());
crate::Pallet::<T>::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap();
// create slash destination account
let lane = LaneId([0, 0, 0, 0]);
let slash_destination = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain);
T::prepare_rewards_account(slash_destination, Zero::zero());
}: {
crate::Pallet::<T>::slash_and_deregister(&relayer, slash_destination.into())
}
verify {
assert!(!crate::Pallet::<T>::is_registration_active(&relayer));
}
// Benchmark `register_relayer_reward` method of the pallet. We are adding this weight to
// the weight of message delivery call if `RefundBridgedParachainMessages` signed extension
// is deployed at runtime level.
register_relayer_reward {
let lane = LaneId([0, 0, 0, 0]);
let relayer: T::AccountId = whitelisted_caller();
let account_params =
RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain);
}: {
crate::Pallet::<T>::register_relayer_reward(account_params, &relayer, One::one());
}
verify {
assert_eq!(RelayerRewards::<T>::get(relayer, &account_params), Some(One::one()));
}
impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime)
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Runtime module that is used to store relayer rewards and (in the future) to
//! coordinate relations between relayers.
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
use bp_relayers::{
ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider,
RewardsAccountParams, StakeAndSlash,
};
use bp_runtime::StorageDoubleMapKeyProvider;
use frame_support::fail;
use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero};
use sp_runtime::{traits::CheckedSub, Saturating};
use sp_std::marker::PhantomData;
pub use pallet::*;
pub use payment_adapter::DeliveryConfirmationPaymentsAdapter;
pub use stake_adapter::StakeAndSlashNamed;
pub use weights::WeightInfo;
pub use weights_ext::WeightInfoExt;
pub mod benchmarking;
mod mock;
mod payment_adapter;
mod stake_adapter;
mod weights_ext;
pub mod weights;
/// The target that will be used when publishing logs related to this pallet.
pub const LOG_TARGET: &str = "runtime::bridge-relayers";
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
/// `RelayerRewardsKeyProvider` for given configuration.
type RelayerRewardsKeyProviderOf<T> =
RelayerRewardsKeyProvider<<T as frame_system::Config>::AccountId, <T as Config>::Reward>;
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Type of relayer reward.
type Reward: AtLeast32BitUnsigned + Copy + Parameter + MaxEncodedLen;
/// Pay rewards scheme.
type PaymentProcedure: PaymentProcedure<Self::AccountId, Self::Reward>;
/// Stake and slash scheme.
type StakeAndSlash: StakeAndSlash<Self::AccountId, BlockNumberFor<Self>, Self::Reward>;
/// Pallet call weights.
type WeightInfo: WeightInfoExt;
}
#[pallet::pallet]
pub struct Pallet<T>(PhantomData<T>);
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Claim accumulated rewards.
#[pallet::call_index(0)]
#[pallet::weight(T::WeightInfo::claim_rewards())]
pub fn claim_rewards(
origin: OriginFor<T>,
rewards_account_params: RewardsAccountParams,
) -> DispatchResult {
let relayer = ensure_signed(origin)?;
RelayerRewards::<T>::try_mutate_exists(
&relayer,
rewards_account_params,
|maybe_reward| -> DispatchResult {
let reward = maybe_reward.take().ok_or(Error::<T>::NoRewardForRelayer)?;
T::PaymentProcedure::pay_reward(&relayer, rewards_account_params, reward)
.map_err(|e| {
log::trace!(
target: LOG_TARGET,
"Failed to pay {:?} rewards to {:?}: {:?}",
rewards_account_params,
relayer,
e,
);
Error::<T>::FailedToPayReward
})?;
Self::deposit_event(Event::<T>::RewardPaid {
relayer: relayer.clone(),
rewards_account_params,
reward,
});
Ok(())
},
)
}
/// Register relayer or update its registration.
///
/// Registration allows relayer to get priority boost for its message delivery transactions.
#[pallet::call_index(1)]
#[pallet::weight(T::WeightInfo::register())]
pub fn register(origin: OriginFor<T>, valid_till: BlockNumberFor<T>) -> DispatchResult {
let relayer = ensure_signed(origin)?;
// valid till must be larger than the current block number and the lease must be larger
// than the `RequiredRegistrationLease`
let lease = valid_till.saturating_sub(frame_system::Pallet::<T>::block_number());
ensure!(
lease > Pallet::<T>::required_registration_lease(),
Error::<T>::InvalidRegistrationLease
);
RegisteredRelayers::<T>::try_mutate(&relayer, |maybe_registration| -> DispatchResult {
let mut registration = maybe_registration
.unwrap_or_else(|| Registration { valid_till, stake: Zero::zero() });
// new `valid_till` must be larger (or equal) than the old one
ensure!(
valid_till >= registration.valid_till,
Error::<T>::CannotReduceRegistrationLease,
);
registration.valid_till = valid_till;
// regarding stake, there are three options:
// - if relayer stake is larger than required stake, we may do unreserve
// - if relayer stake equals to required stake, we do nothing
// - if relayer stake is smaller than required stake, we do additional reserve
let required_stake = Pallet::<T>::required_stake();
if let Some(to_unreserve) = registration.stake.checked_sub(&required_stake) {
Self::do_unreserve(&relayer, to_unreserve)?;
} else if let Some(to_reserve) = required_stake.checked_sub(&registration.stake) {
T::StakeAndSlash::reserve(&relayer, to_reserve).map_err(|e| {
log::trace!(
target: LOG_TARGET,
"Failed to reserve {:?} on relayer {:?} account: {:?}",
to_reserve,
relayer,
e,
);
Error::<T>::FailedToReserve
})?;
}
registration.stake = required_stake;
log::trace!(target: LOG_TARGET, "Successfully registered relayer: {:?}", relayer);
Self::deposit_event(Event::<T>::RegistrationUpdated {
relayer: relayer.clone(),
registration,
});
*maybe_registration = Some(registration);
Ok(())
})
}
/// `Deregister` relayer.
///
/// After this call, message delivery transactions of the relayer won't get any priority
/// boost.
#[pallet::call_index(2)]
#[pallet::weight(T::WeightInfo::deregister())]
pub fn deregister(origin: OriginFor<T>) -> DispatchResult {
let relayer = ensure_signed(origin)?;
RegisteredRelayers::<T>::try_mutate(&relayer, |maybe_registration| -> DispatchResult {
let registration = match maybe_registration.take() {
Some(registration) => registration,
None => fail!(Error::<T>::NotRegistered),
};
// we can't deregister until `valid_till + 1`
ensure!(
registration.valid_till < frame_system::Pallet::<T>::block_number(),
Error::<T>::RegistrationIsStillActive,
);
// if stake is non-zero, we should do unreserve
if !registration.stake.is_zero() {
Self::do_unreserve(&relayer, registration.stake)?;
}
log::trace!(target: LOG_TARGET, "Successfully deregistered relayer: {:?}", relayer);
Self::deposit_event(Event::<T>::Deregistered { relayer: relayer.clone() });
*maybe_registration = None;
Ok(())
})
}
}
impl<T: Config> Pallet<T> {
/// Returns true if given relayer registration is active at current block.
///
/// This call respects both `RequiredStake` and `RequiredRegistrationLease`, meaning that
/// it'll return false if registered stake is lower than required or if remaining lease
/// is less than `RequiredRegistrationLease`.
pub fn is_registration_active(relayer: &T::AccountId) -> bool {
let registration = match Self::registered_relayer(relayer) {
Some(registration) => registration,
None => return false,
};
// registration is inactive if relayer stake is less than required
if registration.stake < Self::required_stake() {
return false
}
// registration is inactive if it ends soon
let remaining_lease = registration
.valid_till
.saturating_sub(frame_system::Pallet::<T>::block_number());
if remaining_lease <= Self::required_registration_lease() {
return false
}
true
}
/// Slash and `deregister` relayer. This function slashes all staked balance.
///
/// It may fail inside, but error is swallowed and we only log it.
pub fn slash_and_deregister(
relayer: &T::AccountId,
slash_destination: ExplicitOrAccountParams<T::AccountId>,
) {
let registration = match RegisteredRelayers::<T>::take(relayer) {
Some(registration) => registration,
None => {
log::trace!(
target: crate::LOG_TARGET,
"Cannot slash unregistered relayer {:?}",
relayer,
);
return
},
};
match T::StakeAndSlash::repatriate_reserved(
relayer,
slash_destination.clone(),
registration.stake,
) {
Ok(failed_to_slash) if failed_to_slash.is_zero() => {
log::trace!(
target: crate::LOG_TARGET,
"Relayer account {:?} has been slashed for {:?}. Funds were deposited to {:?}",
relayer,
registration.stake,
slash_destination,
);
},
Ok(failed_to_slash) => {
log::trace!(
target: crate::LOG_TARGET,
"Relayer account {:?} has been partially slashed for {:?}. Funds were deposited to {:?}. \
Failed to slash: {:?}",
relayer,
registration.stake,
slash_destination,
failed_to_slash,
);
},
Err(e) => {
// TODO: document this. Where?
// it may fail if there's no beneficiary account. For us it means that this
// account must exists before we'll deploy the bridge
log::debug!(
target: crate::LOG_TARGET,
"Failed to slash relayer account {:?}: {:?}. Maybe beneficiary account doesn't exist? \
Beneficiary: {:?}, amount: {:?}, failed to slash: {:?}",
relayer,
e,
slash_destination,
registration.stake,
registration.stake,
);
},
}
}
/// Register reward for given relayer.
pub fn register_relayer_reward(
rewards_account_params: RewardsAccountParams,
relayer: &T::AccountId,
reward: T::Reward,
) {
if reward.is_zero() {
return
}
RelayerRewards::<T>::mutate(
relayer,
rewards_account_params,
|old_reward: &mut Option<T::Reward>| {
let new_reward = old_reward.unwrap_or_else(Zero::zero).saturating_add(reward);
*old_reward = Some(new_reward);
log::trace!(
target: crate::LOG_TARGET,
"Relayer {:?} can now claim reward for serving payer {:?}: {:?}",
relayer,
rewards_account_params,
new_reward,
);
Self::deposit_event(Event::<T>::RewardRegistered {
relayer: relayer.clone(),
rewards_account_params,
reward,
});
},
);
}
/// Return required registration lease.
pub(crate) fn required_registration_lease() -> BlockNumberFor<T> {
<T::StakeAndSlash as StakeAndSlash<
T::AccountId,
BlockNumberFor<T>,
T::Reward,
>>::RequiredRegistrationLease::get()
}
/// Return required stake.
pub(crate) fn required_stake() -> T::Reward {
<T::StakeAndSlash as StakeAndSlash<
T::AccountId,
BlockNumberFor<T>,
T::Reward,
>>::RequiredStake::get()
}
/// `Unreserve` given amount on relayer account.
fn do_unreserve(relayer: &T::AccountId, amount: T::Reward) -> DispatchResult {
let failed_to_unreserve = T::StakeAndSlash::unreserve(relayer, amount);
if !failed_to_unreserve.is_zero() {
log::trace!(
target: LOG_TARGET,
"Failed to unreserve {:?}/{:?} on relayer {:?} account",
failed_to_unreserve,
amount,
relayer,
);
fail!(Error::<T>::FailedToUnreserve)
}
Ok(())
}
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
/// Relayer reward has been registered and may be claimed later.
RewardRegistered {
/// Relayer account that can claim reward.
relayer: T::AccountId,
/// Relayer can claim reward from this account.
rewards_account_params: RewardsAccountParams,
/// Reward amount.
reward: T::Reward,
},
/// Reward has been paid to the relayer.
RewardPaid {
/// Relayer account that has been rewarded.
relayer: T::AccountId,
/// Relayer has received reward from this account.
rewards_account_params: RewardsAccountParams,
/// Reward amount.
reward: T::Reward,
},
/// Relayer registration has been added or updated.
RegistrationUpdated {
/// Relayer account that has been registered.
relayer: T::AccountId,
/// Relayer registration.
registration: Registration<BlockNumberFor<T>, T::Reward>,
},
/// Relayer has been `deregistered`.
Deregistered {
/// Relayer account that has been `deregistered`.
relayer: T::AccountId,
},
/// Relayer has been slashed and `deregistered`.
SlashedAndDeregistered {
/// Relayer account that has been `deregistered`.
relayer: T::AccountId,
/// Registration that was removed.
registration: Registration<BlockNumberFor<T>, T::Reward>,
},
}
#[pallet::error]
pub enum Error<T> {
/// No reward can be claimed by given relayer.
NoRewardForRelayer,
/// Reward payment procedure has failed.
FailedToPayReward,
/// The relayer has tried to register for past block or registration lease
/// is too short.
InvalidRegistrationLease,
/// New registration lease is less than the previous one.
CannotReduceRegistrationLease,
/// Failed to reserve enough funds on relayer account.
FailedToReserve,
/// Failed to `unreserve` enough funds on relayer account.
FailedToUnreserve,
/// Cannot `deregister` if not registered.
NotRegistered,
/// Failed to `deregister` relayer, because lease is still active.
RegistrationIsStillActive,
}
/// Map of the relayer => accumulated reward.
#[pallet::storage]
#[pallet::getter(fn relayer_reward)]
pub type RelayerRewards<T: Config> = StorageDoubleMap<
_,
<RelayerRewardsKeyProviderOf<T> as StorageDoubleMapKeyProvider>::Hasher1,
<RelayerRewardsKeyProviderOf<T> as StorageDoubleMapKeyProvider>::Key1,
<RelayerRewardsKeyProviderOf<T> as StorageDoubleMapKeyProvider>::Hasher2,
<RelayerRewardsKeyProviderOf<T> as StorageDoubleMapKeyProvider>::Key2,
<RelayerRewardsKeyProviderOf<T> as StorageDoubleMapKeyProvider>::Value,
OptionQuery,
>;
/// Relayers that have reserved some of their balance to get free priority boost
/// for their message delivery transactions.
///
/// Other relayers may submit transactions as well, but they will have default
/// priority and will be rejected (without significant tip) in case if registered
/// relayer is present.
#[pallet::storage]
#[pallet::getter(fn registered_relayer)]
pub type RegisteredRelayers<T: Config> = StorageMap<
_,
Blake2_128Concat,
T::AccountId,
Registration<BlockNumberFor<T>, T::Reward>,
OptionQuery,
>;
}
#[cfg(test)]
mod tests {
use super::*;
use mock::{RuntimeEvent as TestEvent, *};
use crate::Event::{RewardPaid, RewardRegistered};
use bp_messages::LaneId;
use bp_relayers::RewardsAccountOwner;
use frame_support::{
assert_noop, assert_ok,
traits::fungible::{Inspect, Mutate},
};
use frame_system::{EventRecord, Pallet as System, Phase};
use sp_runtime::DispatchError;
fn get_ready_for_events() {
System::<TestRuntime>::set_block_number(1);
System::<TestRuntime>::reset_events();
}
#[test]
fn register_relayer_reward_emit_event() {
run_test(|| {
get_ready_for_events();
Pallet::<TestRuntime>::register_relayer_reward(
TEST_REWARDS_ACCOUNT_PARAMS,
&REGULAR_RELAYER,
100,
);
// Check if the `RewardRegistered` event was emitted.
assert_eq!(
System::<TestRuntime>::events().last(),
Some(&EventRecord {
phase: Phase::Initialization,
event: TestEvent::Relayers(RewardRegistered {
relayer: REGULAR_RELAYER,
rewards_account_params: TEST_REWARDS_ACCOUNT_PARAMS,
reward: 100
}),
topics: vec![],
}),
);
});
}
#[test]
fn root_cant_claim_anything() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime>::claim_rewards(
RuntimeOrigin::root(),
TEST_REWARDS_ACCOUNT_PARAMS
),
DispatchError::BadOrigin,
);
});
}
#[test]
fn relayer_cant_claim_if_no_reward_exists() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime>::claim_rewards(
RuntimeOrigin::signed(REGULAR_RELAYER),
TEST_REWARDS_ACCOUNT_PARAMS
),
Error::<TestRuntime>::NoRewardForRelayer,
);
});
}
#[test]
fn relayer_cant_claim_if_payment_procedure_fails() {
run_test(|| {
RelayerRewards::<TestRuntime>::insert(
FAILING_RELAYER,
TEST_REWARDS_ACCOUNT_PARAMS,
100,
);
assert_noop!(
Pallet::<TestRuntime>::claim_rewards(
RuntimeOrigin::signed(FAILING_RELAYER),
TEST_REWARDS_ACCOUNT_PARAMS
),
Error::<TestRuntime>::FailedToPayReward,
);
});
}
#[test]
fn relayer_can_claim_reward() {
run_test(|| {
get_ready_for_events();
RelayerRewards::<TestRuntime>::insert(
REGULAR_RELAYER,
TEST_REWARDS_ACCOUNT_PARAMS,
100,
);
assert_ok!(Pallet::<TestRuntime>::claim_rewards(
RuntimeOrigin::signed(REGULAR_RELAYER),
TEST_REWARDS_ACCOUNT_PARAMS
));
assert_eq!(
RelayerRewards::<TestRuntime>::get(REGULAR_RELAYER, TEST_REWARDS_ACCOUNT_PARAMS),
None
);
// Check if the `RewardPaid` event was emitted.
assert_eq!(
System::<TestRuntime>::events().last(),
Some(&EventRecord {
phase: Phase::Initialization,
event: TestEvent::Relayers(RewardPaid {
relayer: REGULAR_RELAYER,
rewards_account_params: TEST_REWARDS_ACCOUNT_PARAMS,
reward: 100
}),
topics: vec![],
}),
);
});
}
#[test]
fn pay_reward_from_account_actually_pays_reward() {
type Balances = pallet_balances::Pallet<TestRuntime>;
type PayLaneRewardFromAccount = bp_relayers::PayRewardFromAccount<Balances, AccountId>;
run_test(|| {
let in_lane_0 = RewardsAccountParams::new(
LaneId([0, 0, 0, 0]),
*b"test",
RewardsAccountOwner::ThisChain,
);
let out_lane_1 = RewardsAccountParams::new(
LaneId([0, 0, 0, 1]),
*b"test",
RewardsAccountOwner::BridgedChain,
);
let in_lane0_rewards_account = PayLaneRewardFromAccount::rewards_account(in_lane_0);
let out_lane1_rewards_account = PayLaneRewardFromAccount::rewards_account(out_lane_1);
Balances::mint_into(&in_lane0_rewards_account, 100).unwrap();
Balances::mint_into(&out_lane1_rewards_account, 100).unwrap();
assert_eq!(Balances::balance(&in_lane0_rewards_account), 100);
assert_eq!(Balances::balance(&out_lane1_rewards_account), 100);
assert_eq!(Balances::balance(&1), 0);
PayLaneRewardFromAccount::pay_reward(&1, in_lane_0, 100).unwrap();
assert_eq!(Balances::balance(&in_lane0_rewards_account), 0);
assert_eq!(Balances::balance(&out_lane1_rewards_account), 100);
assert_eq!(Balances::balance(&1), 100);
PayLaneRewardFromAccount::pay_reward(&1, out_lane_1, 100).unwrap();
assert_eq!(Balances::balance(&in_lane0_rewards_account), 0);
assert_eq!(Balances::balance(&out_lane1_rewards_account), 0);
assert_eq!(Balances::balance(&1), 200);
});
}
#[test]
fn register_fails_if_valid_till_is_a_past_block() {
run_test(|| {
System::<TestRuntime>::set_block_number(100);
assert_noop!(
Pallet::<TestRuntime>::register(RuntimeOrigin::signed(REGISTER_RELAYER), 50),
Error::<TestRuntime>::InvalidRegistrationLease,
);
});
}
#[test]
fn register_fails_if_valid_till_lease_is_less_than_required() {
run_test(|| {
System::<TestRuntime>::set_block_number(100);
assert_noop!(
Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
99 + Lease::get()
),
Error::<TestRuntime>::InvalidRegistrationLease,
);
});
}
#[test]
fn register_works() {
run_test(|| {
get_ready_for_events();
assert_ok!(Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
150
));
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get());
assert_eq!(
Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER),
Some(Registration { valid_till: 150, stake: Stake::get() }),
);
assert_eq!(
System::<TestRuntime>::events().last(),
Some(&EventRecord {
phase: Phase::Initialization,
event: TestEvent::Relayers(Event::RegistrationUpdated {
relayer: REGISTER_RELAYER,
registration: Registration { valid_till: 150, stake: Stake::get() },
}),
topics: vec![],
}),
);
});
}
#[test]
fn register_fails_if_new_valid_till_is_lesser_than_previous() {
run_test(|| {
assert_ok!(Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
150
));
assert_noop!(
Pallet::<TestRuntime>::register(RuntimeOrigin::signed(REGISTER_RELAYER), 125),
Error::<TestRuntime>::CannotReduceRegistrationLease,
);
});
}
#[test]
fn register_fails_if_it_cant_unreserve_some_balance_if_required_stake_decreases() {
run_test(|| {
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 150, stake: Stake::get() + 1 },
);
assert_noop!(
Pallet::<TestRuntime>::register(RuntimeOrigin::signed(REGISTER_RELAYER), 150),
Error::<TestRuntime>::FailedToUnreserve,
);
});
}
#[test]
fn register_unreserves_some_balance_if_required_stake_decreases() {
run_test(|| {
get_ready_for_events();
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 150, stake: Stake::get() + 1 },
);
TestStakeAndSlash::reserve(&REGISTER_RELAYER, Stake::get() + 1).unwrap();
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get() + 1);
let free_balance = Balances::free_balance(REGISTER_RELAYER);
assert_ok!(Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
150
));
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get());
assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance + 1);
assert_eq!(
Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER),
Some(Registration { valid_till: 150, stake: Stake::get() }),
);
assert_eq!(
System::<TestRuntime>::events().last(),
Some(&EventRecord {
phase: Phase::Initialization,
event: TestEvent::Relayers(Event::RegistrationUpdated {
relayer: REGISTER_RELAYER,
registration: Registration { valid_till: 150, stake: Stake::get() }
}),
topics: vec![],
}),
);
});
}
#[test]
fn register_fails_if_it_cant_reserve_some_balance() {
run_test(|| {
Balances::set_balance(&REGISTER_RELAYER, 0);
assert_noop!(
Pallet::<TestRuntime>::register(RuntimeOrigin::signed(REGISTER_RELAYER), 150),
Error::<TestRuntime>::FailedToReserve,
);
});
}
#[test]
fn register_fails_if_it_cant_reserve_some_balance_if_required_stake_increases() {
run_test(|| {
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 150, stake: Stake::get() - 1 },
);
Balances::set_balance(&REGISTER_RELAYER, 0);
assert_noop!(
Pallet::<TestRuntime>::register(RuntimeOrigin::signed(REGISTER_RELAYER), 150),
Error::<TestRuntime>::FailedToReserve,
);
});
}
#[test]
fn register_reserves_some_balance_if_required_stake_increases() {
run_test(|| {
get_ready_for_events();
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 150, stake: Stake::get() - 1 },
);
TestStakeAndSlash::reserve(&REGISTER_RELAYER, Stake::get() - 1).unwrap();
let free_balance = Balances::free_balance(REGISTER_RELAYER);
assert_ok!(Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
150
));
assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get());
assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance - 1);
assert_eq!(
Pallet::<TestRuntime>::registered_relayer(REGISTER_RELAYER),
Some(Registration { valid_till: 150, stake: Stake::get() }),
);
assert_eq!(
System::<TestRuntime>::events().last(),
Some(&EventRecord {
phase: Phase::Initialization,
event: TestEvent::Relayers(Event::RegistrationUpdated {
relayer: REGISTER_RELAYER,
registration: Registration { valid_till: 150, stake: Stake::get() }
}),
topics: vec![],
}),
);
});
}
#[test]
fn deregister_fails_if_not_registered() {
run_test(|| {
assert_noop!(
Pallet::<TestRuntime>::deregister(RuntimeOrigin::signed(REGISTER_RELAYER)),
Error::<TestRuntime>::NotRegistered,
);
});
}
#[test]
fn deregister_fails_if_registration_is_still_active() {
run_test(|| {
assert_ok!(Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
150
));
System::<TestRuntime>::set_block_number(100);
assert_noop!(
Pallet::<TestRuntime>::deregister(RuntimeOrigin::signed(REGISTER_RELAYER)),
Error::<TestRuntime>::RegistrationIsStillActive,
);
});
}
#[test]
fn deregister_works() {
run_test(|| {
get_ready_for_events();
assert_ok!(Pallet::<TestRuntime>::register(
RuntimeOrigin::signed(REGISTER_RELAYER),
150
));
System::<TestRuntime>::set_block_number(151);
let reserved_balance = Balances::reserved_balance(REGISTER_RELAYER);
let free_balance = Balances::free_balance(REGISTER_RELAYER);
assert_ok!(Pallet::<TestRuntime>::deregister(RuntimeOrigin::signed(REGISTER_RELAYER)));
assert_eq!(
Balances::reserved_balance(REGISTER_RELAYER),
reserved_balance - Stake::get()
);
assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance + Stake::get());
assert_eq!(
System::<TestRuntime>::events().last(),
Some(&EventRecord {
phase: Phase::Initialization,
event: TestEvent::Relayers(Event::Deregistered { relayer: REGISTER_RELAYER }),
topics: vec![],
}),
);
});
}
#[test]
fn is_registration_active_is_false_for_unregistered_relayer() {
run_test(|| {
assert!(!Pallet::<TestRuntime>::is_registration_active(&REGISTER_RELAYER));
});
}
#[test]
fn is_registration_active_is_false_when_stake_is_too_low() {
run_test(|| {
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 150, stake: Stake::get() - 1 },
);
assert!(!Pallet::<TestRuntime>::is_registration_active(&REGISTER_RELAYER));
});
}
#[test]
fn is_registration_active_is_false_when_remaining_lease_is_too_low() {
run_test(|| {
System::<TestRuntime>::set_block_number(150 - Lease::get());
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 150, stake: Stake::get() },
);
assert!(!Pallet::<TestRuntime>::is_registration_active(&REGISTER_RELAYER));
});
}
#[test]
fn is_registration_active_is_true_when_relayer_is_properly_registeered() {
run_test(|| {
System::<TestRuntime>::set_block_number(150 - Lease::get());
RegisteredRelayers::<TestRuntime>::insert(
REGISTER_RELAYER,
Registration { valid_till: 151, stake: Stake::get() },
);
assert!(Pallet::<TestRuntime>::is_registration_active(&REGISTER_RELAYER));
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
#![cfg(test)]
use crate as pallet_bridge_relayers;
use bp_messages::LaneId;
use bp_relayers::{
PayRewardFromAccount, PaymentProcedure, RewardsAccountOwner, RewardsAccountParams,
};
use frame_support::{
derive_impl, parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight,
};
use sp_runtime::BuildStorage;
pub type AccountId = u64;
pub type Balance = u64;
pub type BlockNumber = u64;
pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed<
AccountId,
BlockNumber,
Balances,
ReserveId,
Stake,
Lease,
>;
type Block = frame_system::mocking::MockBlock<TestRuntime>;
frame_support::construct_runtime! {
pub enum TestRuntime
{
System: frame_system::{Pallet, Call, Config<T>, Storage, Event<T>},
Balances: pallet_balances::{Pallet, Event<T>},
Relayers: pallet_bridge_relayers::{Pallet, Call, Event<T>},
}
}
parameter_types! {
pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 };
pub const ExistentialDeposit: Balance = 1;
pub const ReserveId: [u8; 8] = *b"brdgrlrs";
pub const Stake: Balance = 1_000;
pub const Lease: BlockNumber = 8;
}
#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)]
impl frame_system::Config for TestRuntime {
type Block = Block;
type AccountData = pallet_balances::AccountData<Balance>;
type DbWeight = DbWeight;
}
#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)]
impl pallet_balances::Config for TestRuntime {
type ReserveIdentifier = [u8; 8];
type AccountStore = System;
}
impl pallet_bridge_relayers::Config for TestRuntime {
type RuntimeEvent = RuntimeEvent;
type Reward = Balance;
type PaymentProcedure = TestPaymentProcedure;
type StakeAndSlash = TestStakeAndSlash;
type WeightInfo = ();
}
#[cfg(feature = "runtime-benchmarks")]
impl pallet_bridge_relayers::benchmarking::Config for TestRuntime {
fn prepare_rewards_account(account_params: RewardsAccountParams, reward: Balance) {
let rewards_account =
bp_relayers::PayRewardFromAccount::<Balances, AccountId>::rewards_account(
account_params,
);
Self::deposit_account(rewards_account, reward);
}
fn deposit_account(account: Self::AccountId, balance: Self::Reward) {
Balances::mint_into(&account, balance.saturating_add(ExistentialDeposit::get())).unwrap();
}
}
/// Message lane that we're using in tests.
pub const TEST_REWARDS_ACCOUNT_PARAMS: RewardsAccountParams =
RewardsAccountParams::new(LaneId([0, 0, 0, 0]), *b"test", RewardsAccountOwner::ThisChain);
/// Regular relayer that may receive rewards.
pub const REGULAR_RELAYER: AccountId = 1;
/// Relayer that can't receive rewards.
pub const FAILING_RELAYER: AccountId = 2;
/// Relayer that is able to register.
pub const REGISTER_RELAYER: AccountId = 42;
/// Payment procedure that rejects payments to the `FAILING_RELAYER`.
pub struct TestPaymentProcedure;
impl TestPaymentProcedure {
pub fn rewards_account(params: RewardsAccountParams) -> AccountId {
PayRewardFromAccount::<(), AccountId>::rewards_account(params)
}
}
impl PaymentProcedure<AccountId, Balance> for TestPaymentProcedure {
type Error = ();
fn pay_reward(
relayer: &AccountId,
_lane_id: RewardsAccountParams,
_reward: Balance,
) -> Result<(), Self::Error> {
match *relayer {
FAILING_RELAYER => Err(()),
_ => Ok(()),
}
}
}
/// Return test externalities to use in tests.
pub fn new_test_ext() -> sp_io::TestExternalities {
let t = frame_system::GenesisConfig::<TestRuntime>::default().build_storage().unwrap();
sp_io::TestExternalities::new(t)
}
/// Run pallet test.
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
new_test_ext().execute_with(|| {
Balances::mint_into(&REGISTER_RELAYER, ExistentialDeposit::get() + 10 * Stake::get())
.unwrap();
test()
})
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Code that allows relayers pallet to be used as a payment mechanism for the messages pallet.
use crate::{Config, Pallet};
use bp_messages::{
source_chain::{DeliveryConfirmationPayments, RelayersRewards},
LaneId, MessageNonce,
};
use bp_relayers::{RewardsAccountOwner, RewardsAccountParams};
use frame_support::{sp_runtime::SaturatedConversion, traits::Get};
use sp_arithmetic::traits::{Saturating, Zero};
use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeInclusive};
/// Adapter that allows relayers pallet to be used as a delivery+dispatch payment mechanism
/// for the messages pallet.
pub struct DeliveryConfirmationPaymentsAdapter<T, MI, DeliveryReward>(
PhantomData<(T, MI, DeliveryReward)>,
);
impl<T, MI, DeliveryReward> DeliveryConfirmationPayments<T::AccountId>
for DeliveryConfirmationPaymentsAdapter<T, MI, DeliveryReward>
where
T: Config + pallet_bridge_messages::Config<MI>,
MI: 'static,
DeliveryReward: Get<T::Reward>,
{
type Error = &'static str;
fn pay_reward(
lane_id: LaneId,
messages_relayers: VecDeque<bp_messages::UnrewardedRelayer<T::AccountId>>,
confirmation_relayer: &T::AccountId,
received_range: &RangeInclusive<bp_messages::MessageNonce>,
) -> MessageNonce {
let relayers_rewards =
bp_messages::calc_relayers_rewards::<T::AccountId>(messages_relayers, received_range);
let rewarded_relayers = relayers_rewards.len();
register_relayers_rewards::<T>(
confirmation_relayer,
relayers_rewards,
RewardsAccountParams::new(
lane_id,
T::BridgedChainId::get(),
RewardsAccountOwner::BridgedChain,
),
DeliveryReward::get(),
);
rewarded_relayers as _
}
}
// Update rewards to given relayers, optionally rewarding confirmation relayer.
fn register_relayers_rewards<T: Config>(
confirmation_relayer: &T::AccountId,
relayers_rewards: RelayersRewards<T::AccountId>,
lane_id: RewardsAccountParams,
delivery_fee: T::Reward,
) {
// reward every relayer except `confirmation_relayer`
let mut confirmation_relayer_reward = T::Reward::zero();
for (relayer, messages) in relayers_rewards {
// sane runtime configurations guarantee that the number of messages will be below
// `u32::MAX`
let relayer_reward = T::Reward::saturated_from(messages).saturating_mul(delivery_fee);
if relayer != *confirmation_relayer {
Pallet::<T>::register_relayer_reward(lane_id, &relayer, relayer_reward);
} else {
confirmation_relayer_reward =
confirmation_relayer_reward.saturating_add(relayer_reward);
}
}
// finally - pay reward to confirmation relayer
Pallet::<T>::register_relayer_reward(
lane_id,
confirmation_relayer,
confirmation_relayer_reward,
);
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{mock::*, RelayerRewards};
const RELAYER_1: AccountId = 1;
const RELAYER_2: AccountId = 2;
const RELAYER_3: AccountId = 3;
fn relayers_rewards() -> RelayersRewards<AccountId> {
vec![(RELAYER_1, 2), (RELAYER_2, 3)].into_iter().collect()
}
#[test]
fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() {
run_test(|| {
register_relayers_rewards::<TestRuntime>(
&RELAYER_2,
relayers_rewards(),
TEST_REWARDS_ACCOUNT_PARAMS,
50,
);
assert_eq!(
RelayerRewards::<TestRuntime>::get(RELAYER_1, TEST_REWARDS_ACCOUNT_PARAMS),
Some(100)
);
assert_eq!(
RelayerRewards::<TestRuntime>::get(RELAYER_2, TEST_REWARDS_ACCOUNT_PARAMS),
Some(150)
);
});
}
#[test]
fn confirmation_relayer_is_not_rewarded_if_it_has_not_delivered_any_messages() {
run_test(|| {
register_relayers_rewards::<TestRuntime>(
&RELAYER_3,
relayers_rewards(),
TEST_REWARDS_ACCOUNT_PARAMS,
50,
);
assert_eq!(
RelayerRewards::<TestRuntime>::get(RELAYER_1, TEST_REWARDS_ACCOUNT_PARAMS),
Some(100)
);
assert_eq!(
RelayerRewards::<TestRuntime>::get(RELAYER_2, TEST_REWARDS_ACCOUNT_PARAMS),
Some(150)
);
assert_eq!(
RelayerRewards::<TestRuntime>::get(RELAYER_3, TEST_REWARDS_ACCOUNT_PARAMS),
None
);
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Code that allows `NamedReservableCurrency` to be used as a `StakeAndSlash`
//! mechanism of the relayers pallet.
use bp_relayers::{ExplicitOrAccountParams, PayRewardFromAccount, StakeAndSlash};
use codec::Codec;
use frame_support::traits::{tokens::BalanceStatus, NamedReservableCurrency};
use sp_runtime::{traits::Get, DispatchError, DispatchResult};
use sp_std::{fmt::Debug, marker::PhantomData};
/// `StakeAndSlash` that works with `NamedReservableCurrency` and uses named
/// reservations.
///
/// **WARNING**: this implementation assumes that the relayers pallet is configured to
/// use the [`bp_relayers::PayRewardFromAccount`] as its relayers payment scheme.
pub struct StakeAndSlashNamed<AccountId, BlockNumber, Currency, ReserveId, Stake, Lease>(
PhantomData<(AccountId, BlockNumber, Currency, ReserveId, Stake, Lease)>,
);
impl<AccountId, BlockNumber, Currency, ReserveId, Stake, Lease>
StakeAndSlash<AccountId, BlockNumber, Currency::Balance>
for StakeAndSlashNamed<AccountId, BlockNumber, Currency, ReserveId, Stake, Lease>
where
AccountId: Codec + Debug,
Currency: NamedReservableCurrency<AccountId>,
ReserveId: Get<Currency::ReserveIdentifier>,
Stake: Get<Currency::Balance>,
Lease: Get<BlockNumber>,
{
type RequiredStake = Stake;
type RequiredRegistrationLease = Lease;
fn reserve(relayer: &AccountId, amount: Currency::Balance) -> DispatchResult {
Currency::reserve_named(&ReserveId::get(), relayer, amount)
}
fn unreserve(relayer: &AccountId, amount: Currency::Balance) -> Currency::Balance {
Currency::unreserve_named(&ReserveId::get(), relayer, amount)
}
fn repatriate_reserved(
relayer: &AccountId,
beneficiary: ExplicitOrAccountParams<AccountId>,
amount: Currency::Balance,
) -> Result<Currency::Balance, DispatchError> {
let beneficiary_account = match beneficiary {
ExplicitOrAccountParams::Explicit(account) => account,
ExplicitOrAccountParams::Params(params) =>
PayRewardFromAccount::<(), AccountId>::rewards_account(params),
};
Currency::repatriate_reserved_named(
&ReserveId::get(),
relayer,
&beneficiary_account,
amount,
BalanceStatus::Free,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::mock::*;
use frame_support::traits::fungible::Mutate;
fn test_stake() -> Balance {
Stake::get()
}
#[test]
fn reserve_works() {
run_test(|| {
assert!(TestStakeAndSlash::reserve(&1, test_stake()).is_err());
assert_eq!(Balances::free_balance(1), 0);
assert_eq!(Balances::reserved_balance(1), 0);
Balances::mint_into(&2, test_stake() - 1).unwrap();
assert!(TestStakeAndSlash::reserve(&2, test_stake()).is_err());
assert_eq!(Balances::free_balance(2), test_stake() - 1);
assert_eq!(Balances::reserved_balance(2), 0);
Balances::mint_into(&3, test_stake() * 2).unwrap();
assert_eq!(TestStakeAndSlash::reserve(&3, test_stake()), Ok(()));
assert_eq!(Balances::free_balance(3), test_stake());
assert_eq!(Balances::reserved_balance(3), test_stake());
})
}
#[test]
fn unreserve_works() {
run_test(|| {
assert_eq!(TestStakeAndSlash::unreserve(&1, test_stake()), test_stake());
assert_eq!(Balances::free_balance(1), 0);
assert_eq!(Balances::reserved_balance(1), 0);
Balances::mint_into(&2, test_stake() * 2).unwrap();
TestStakeAndSlash::reserve(&2, test_stake() / 3).unwrap();
assert_eq!(
TestStakeAndSlash::unreserve(&2, test_stake()),
test_stake() - test_stake() / 3
);
assert_eq!(Balances::free_balance(2), test_stake() * 2);
assert_eq!(Balances::reserved_balance(2), 0);
Balances::mint_into(&3, test_stake() * 2).unwrap();
TestStakeAndSlash::reserve(&3, test_stake()).unwrap();
assert_eq!(TestStakeAndSlash::unreserve(&3, test_stake()), 0);
assert_eq!(Balances::free_balance(3), test_stake() * 2);
assert_eq!(Balances::reserved_balance(3), 0);
})
}
#[test]
fn repatriate_reserved_works() {
run_test(|| {
let beneficiary = TEST_REWARDS_ACCOUNT_PARAMS;
let beneficiary_account = TestPaymentProcedure::rewards_account(beneficiary);
let mut expected_balance = ExistentialDeposit::get();
Balances::mint_into(&beneficiary_account, expected_balance).unwrap();
assert_eq!(
TestStakeAndSlash::repatriate_reserved(
&1,
ExplicitOrAccountParams::Params(beneficiary),
test_stake()
),
Ok(test_stake())
);
assert_eq!(Balances::free_balance(1), 0);
assert_eq!(Balances::reserved_balance(1), 0);
assert_eq!(Balances::free_balance(beneficiary_account), expected_balance);
assert_eq!(Balances::reserved_balance(beneficiary_account), 0);
expected_balance += test_stake() / 3;
Balances::mint_into(&2, test_stake() * 2).unwrap();
TestStakeAndSlash::reserve(&2, test_stake() / 3).unwrap();
assert_eq!(
TestStakeAndSlash::repatriate_reserved(
&2,
ExplicitOrAccountParams::Params(beneficiary),
test_stake()
),
Ok(test_stake() - test_stake() / 3)
);
assert_eq!(Balances::free_balance(2), test_stake() * 2 - test_stake() / 3);
assert_eq!(Balances::reserved_balance(2), 0);
assert_eq!(Balances::free_balance(beneficiary_account), expected_balance);
assert_eq!(Balances::reserved_balance(beneficiary_account), 0);
expected_balance += test_stake();
Balances::mint_into(&3, test_stake() * 2).unwrap();
TestStakeAndSlash::reserve(&3, test_stake()).unwrap();
assert_eq!(
TestStakeAndSlash::repatriate_reserved(
&3,
ExplicitOrAccountParams::Params(beneficiary),
test_stake()
),
Ok(0)
);
assert_eq!(Balances::free_balance(3), test_stake());
assert_eq!(Balances::reserved_balance(3), 0);
assert_eq!(Balances::free_balance(beneficiary_account), expected_balance);
assert_eq!(Balances::reserved_balance(beneficiary_account), 0);
})
}
#[test]
fn repatriate_reserved_doesnt_work_when_beneficiary_account_is_missing() {
run_test(|| {
let beneficiary = TEST_REWARDS_ACCOUNT_PARAMS;
let beneficiary_account = TestPaymentProcedure::rewards_account(beneficiary);
Balances::mint_into(&3, test_stake() * 2).unwrap();
TestStakeAndSlash::reserve(&3, test_stake()).unwrap();
assert!(TestStakeAndSlash::repatriate_reserved(
&3,
ExplicitOrAccountParams::Params(beneficiary),
test_stake()
)
.is_err());
assert_eq!(Balances::free_balance(3), test_stake());
assert_eq!(Balances::reserved_balance(3), test_stake());
assert_eq!(Balances::free_balance(beneficiary_account), 0);
assert_eq!(Balances::reserved_balance(beneficiary_account), 0);
});
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Autogenerated weights for pallet_bridge_relayers
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-04-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// target/release/rip-bridge-node
// benchmark
// pallet
// --chain=dev
// --steps=50
// --repeat=20
// --pallet=pallet_bridge_relayers
// --extrinsic=*
// --execution=wasm
// --wasm-execution=Compiled
// --heap-pages=4096
// --output=./modules/relayers/src/weights.rs
// --template=./.maintain/bridge-weight-template.hbs
#![allow(clippy::all)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{
traits::Get,
weights::{constants::RocksDbWeight, Weight},
};
use sp_std::marker::PhantomData;
/// Weight functions needed for pallet_bridge_relayers.
pub trait WeightInfo {
fn claim_rewards() -> Weight;
fn register() -> Weight;
fn deregister() -> Weight;
fn slash_and_deregister() -> Weight;
fn register_relayer_reward() -> Weight;
}
/// Weights for `pallet_bridge_relayers` that are generated using one of the Bridge testnets.
///
/// Those weights are test only and must never be used in production.
pub struct BridgeWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for BridgeWeight<T> {
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
///
/// Storage: Balances TotalIssuance (r:1 w:0)
///
/// Proof: Balances TotalIssuance (max_values: Some(1), max_size: Some(8), added: 503, mode:
/// MaxEncodedLen)
///
/// Storage: System Account (r:1 w:1)
///
/// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode:
/// MaxEncodedLen)
fn claim_rewards() -> Weight {
// Proof Size summary in bytes:
// Measured: `294`
// Estimated: `8592`
// Minimum execution time: 77_614 nanoseconds.
Weight::from_parts(79_987_000, 8592)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1)
///
/// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539,
/// mode: MaxEncodedLen)
///
/// Storage: Balances Reserves (r:1 w:1)
///
/// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode:
/// MaxEncodedLen)
fn register() -> Weight {
// Proof Size summary in bytes:
// Measured: `87`
// Estimated: `7843`
// Minimum execution time: 39_590 nanoseconds.
Weight::from_parts(40_546_000, 7843)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1)
///
/// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539,
/// mode: MaxEncodedLen)
///
/// Storage: Balances Reserves (r:1 w:1)
///
/// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode:
/// MaxEncodedLen)
fn deregister() -> Weight {
// Proof Size summary in bytes:
// Measured: `264`
// Estimated: `7843`
// Minimum execution time: 43_332 nanoseconds.
Weight::from_parts(45_087_000, 7843)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1)
///
/// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539,
/// mode: MaxEncodedLen)
///
/// Storage: Balances Reserves (r:1 w:1)
///
/// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode:
/// MaxEncodedLen)
///
/// Storage: System Account (r:1 w:1)
///
/// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode:
/// MaxEncodedLen)
fn slash_and_deregister() -> Weight {
// Proof Size summary in bytes:
// Measured: `380`
// Estimated: `11412`
// Minimum execution time: 42_358 nanoseconds.
Weight::from_parts(43_539_000, 11412)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
}
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn register_relayer_reward() -> Weight {
// Proof Size summary in bytes:
// Measured: `12`
// Estimated: `3530`
// Minimum execution time: 6_338 nanoseconds.
Weight::from_parts(6_526_000, 3530)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
///
/// Storage: Balances TotalIssuance (r:1 w:0)
///
/// Proof: Balances TotalIssuance (max_values: Some(1), max_size: Some(8), added: 503, mode:
/// MaxEncodedLen)
///
/// Storage: System Account (r:1 w:1)
///
/// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode:
/// MaxEncodedLen)
fn claim_rewards() -> Weight {
// Proof Size summary in bytes:
// Measured: `294`
// Estimated: `8592`
// Minimum execution time: 77_614 nanoseconds.
Weight::from_parts(79_987_000, 8592)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1)
///
/// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539,
/// mode: MaxEncodedLen)
///
/// Storage: Balances Reserves (r:1 w:1)
///
/// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode:
/// MaxEncodedLen)
fn register() -> Weight {
// Proof Size summary in bytes:
// Measured: `87`
// Estimated: `7843`
// Minimum execution time: 39_590 nanoseconds.
Weight::from_parts(40_546_000, 7843)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1)
///
/// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539,
/// mode: MaxEncodedLen)
///
/// Storage: Balances Reserves (r:1 w:1)
///
/// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode:
/// MaxEncodedLen)
fn deregister() -> Weight {
// Proof Size summary in bytes:
// Measured: `264`
// Estimated: `7843`
// Minimum execution time: 43_332 nanoseconds.
Weight::from_parts(45_087_000, 7843)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1)
///
/// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539,
/// mode: MaxEncodedLen)
///
/// Storage: Balances Reserves (r:1 w:1)
///
/// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode:
/// MaxEncodedLen)
///
/// Storage: System Account (r:1 w:1)
///
/// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode:
/// MaxEncodedLen)
fn slash_and_deregister() -> Weight {
// Proof Size summary in bytes:
// Measured: `380`
// Estimated: `11412`
// Minimum execution time: 42_358 nanoseconds.
Weight::from_parts(43_539_000, 11412)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
}
/// Storage: BridgeRelayers RelayerRewards (r:1 w:1)
///
/// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540,
/// mode: MaxEncodedLen)
fn register_relayer_reward() -> Weight {
// Proof Size summary in bytes:
// Measured: `12`
// Estimated: `3530`
// Minimum execution time: 6_338 nanoseconds.
Weight::from_parts(6_526_000, 3530)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
}
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Weight-related utilities.
use crate::weights::WeightInfo;
use frame_support::pallet_prelude::Weight;
/// Extended weight info.
pub trait WeightInfoExt: WeightInfo {
/// Returns weight, that needs to be added to the pre-dispatch weight of message delivery call,
/// if `RefundBridgedParachainMessages` signed extension is deployed at runtime level.
fn receive_messages_proof_overhead_from_runtime() -> Weight {
Self::slash_and_deregister().max(Self::register_relayer_reward())
}
/// Returns weight, that needs to be added to the pre-dispatch weight of message delivery
/// confirmation call, if `RefundBridgedParachainMessages` signed extension is deployed at
/// runtime level.
fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight {
Self::register_relayer_reward()
}
/// Returns weight that we need to deduct from the message delivery call weight that has
/// completed successfully.
///
/// Usually, the weight of `slash_and_deregister` is larger than the weight of the
/// `register_relayer_reward`. So if relayer has been rewarded, we want to deduct the difference
/// to get the actual post-dispatch weight.
fn extra_weight_of_successful_receive_messages_proof_call() -> Weight {
Self::slash_and_deregister().saturating_sub(Self::register_relayer_reward())
}
}
impl<T: WeightInfo> WeightInfoExt for T {}