diff --git a/bridges/bin/millau/runtime/src/lib.rs b/bridges/bin/millau/runtime/src/lib.rs
index 538417127e3623d68cc10deec23709ed14ad5b43..cb0862bc047f39f1792b9ac81cab714724e50d6e 100644
--- a/bridges/bin/millau/runtime/src/lib.rs
+++ b/bridges/bin/millau/runtime/src/lib.rs
@@ -550,6 +550,8 @@ impl pallet_bridge_messages::Config<WithRialtoParachainMessagesInstance> for Run
 parameter_types! {
 	pub const RialtoParasPalletName: &'static str = bp_rialto::PARAS_PALLET_NAME;
 	pub const WestendParasPalletName: &'static str = bp_westend::PARAS_PALLET_NAME;
+	pub const MaxRialtoParaHeadSize: u32 = bp_rialto::MAX_NESTED_PARACHAIN_HEAD_SIZE;
+	pub const MaxWestendParaHeadSize: u32 = bp_westend::MAX_NESTED_PARACHAIN_HEAD_SIZE;
 }
 
 /// Instance of the with-Rialto parachains pallet.
@@ -562,6 +564,7 @@ impl pallet_bridge_parachains::Config<WithRialtoParachainsInstance> for Runtime
 	type ParasPalletName = RialtoParasPalletName;
 	type TrackedParachains = frame_support::traits::Everything;
 	type HeadsToKeep = HeadersToKeep;
+	type MaxParaHeadSize = MaxRialtoParaHeadSize;
 }
 
 /// Instance of the with-Westend parachains pallet.
@@ -574,6 +577,7 @@ impl pallet_bridge_parachains::Config<WithWestendParachainsInstance> for Runtime
 	type ParasPalletName = WestendParasPalletName;
 	type TrackedParachains = frame_support::traits::Everything;
 	type HeadsToKeep = HeadersToKeep;
+	type MaxParaHeadSize = MaxWestendParaHeadSize;
 }
 
 construct_runtime!(
diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs
index c4cbadcb388f6f8366a4b1d91e8bfbd351a1defd..37afbf2739ec76aa1b2f8492ad13c1eb2889399f 100644
--- a/bridges/modules/grandpa/src/lib.rs
+++ b/bridges/modules/grandpa/src/lib.rs
@@ -36,10 +36,12 @@
 // Runtime-generated enums
 #![allow(clippy::large_enum_variant)]
 
-use storage_types::{StoredAuthoritySet, StoredBridgedHeader};
+use storage_types::StoredAuthoritySet;
 
 use bp_header_chain::{justification::GrandpaJustification, InitializationData};
-use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf, OwnedBridgeModule};
+use bp_runtime::{
+	BlockNumberOf, BoundedStorageValue, Chain, HashOf, HasherOf, HeaderOf, OwnedBridgeModule,
+};
 use finality_grandpa::voter_set::VoterSet;
 use frame_support::{ensure, fail};
 use frame_system::ensure_signed;
@@ -73,6 +75,9 @@ pub type BridgedBlockHash<T, I> = HashOf<<T as Config<I>>::BridgedChain>;
 pub type BridgedBlockHasher<T, I> = HasherOf<<T as Config<I>>::BridgedChain>;
 /// Header of the bridged chain.
 pub type BridgedHeader<T, I> = HeaderOf<<T as Config<I>>::BridgedChain>;
+/// Stored header of the bridged chain.
+pub type StoredBridgedHeader<T, I> =
+	BoundedStorageValue<<T as Config<I>>::MaxBridgedHeaderSize, BridgedHeader<T, I>>;
 
 #[frame_support::pallet]
 pub mod pallet {
@@ -199,8 +204,18 @@ pub mod pallet {
 
 			let is_authorities_change_enacted =
 				try_enact_authority_change::<T, I>(&finality_target, set_id)?;
-			let finality_target =
-				StoredBridgedHeader::<T, I>::try_from_bridged_header(*finality_target)?;
+			let finality_target = StoredBridgedHeader::<T, I>::try_from_inner(*finality_target)
+				.map_err(|e| {
+					log::error!(
+						target: LOG_TARGET,
+						"Size of header {:?} ({}) is larger that the configured value {}",
+						hash,
+						e.value_size,
+						e.maximal_size,
+					);
+
+					Error::<T, I>::TooLargeHeader
+				})?;
 			<RequestCount<T, I>>::mutate(|count| *count += 1);
 			insert_header::<T, I>(finality_target, hash);
 			log::info!(
@@ -504,7 +519,8 @@ pub mod pallet {
 			init_params;
 		let authority_set = StoredAuthoritySet::<T, I>::try_new(authority_list, set_id)
 			.map_err(|_| Error::TooManyAuthoritiesInSet)?;
-		let header = StoredBridgedHeader::<T, I>::try_from_bridged_header(*header)?;
+		let header = StoredBridgedHeader::<T, I>::try_from_inner(*header)
+			.map_err(|_| Error::<T, I>::TooLargeHeader)?;
 
 		let initial_hash = header.hash();
 		<InitialHash<T, I>>::put(initial_hash);
@@ -538,7 +554,7 @@ pub mod pallet {
 			);
 			let hash = header.hash();
 			insert_header::<T, I>(
-				StoredBridgedHeader::try_from_bridged_header(header)
+				StoredBridgedHeader::<T, I>::try_from_inner(header)
 					.expect("only used from benchmarks; benchmarks are correct; qed"),
 				hash,
 			);
@@ -553,7 +569,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	/// if the pallet has not been initialized yet.
 	pub fn best_finalized() -> Option<BridgedHeader<T, I>> {
 		let (_, hash) = <BestFinalized<T, I>>::get()?;
-		<ImportedHeaders<T, I>>::get(hash).map(|h| h.0)
+		<ImportedHeaders<T, I>>::get(hash).map(|h| h.into_inner())
 	}
 
 	/// Check if a particular header is known to the bridge pallet.
@@ -1103,7 +1119,7 @@ mod tests {
 			<BestFinalized<TestRuntime>>::put((2, hash));
 			<ImportedHeaders<TestRuntime>>::insert(
 				hash,
-				StoredBridgedHeader::try_from_bridged_header(header).unwrap(),
+				StoredBridgedHeader::<TestRuntime, ()>::try_from_inner(header).unwrap(),
 			);
 
 			assert_ok!(
diff --git a/bridges/modules/grandpa/src/storage_types.rs b/bridges/modules/grandpa/src/storage_types.rs
index 5d2cb661d813ab2099de28dc8045775fbe11bba2..ac4835e4f0d4133cc7a562756d3a07d5f847cfa3 100644
--- a/bridges/modules/grandpa/src/storage_types.rs
+++ b/bridges/modules/grandpa/src/storage_types.rs
@@ -16,12 +16,12 @@
 
 //! Wrappers for public types that are implementing `MaxEncodedLen`
 
-use crate::{BridgedHeader, Config, Error};
+use crate::Config;
 
 use bp_header_chain::AuthoritySet;
 use codec::{Decode, Encode, MaxEncodedLen};
-use frame_support::{traits::Get, BoundedVec, RuntimeDebugNoBound};
-use scale_info::{Type, TypeInfo};
+use frame_support::{BoundedVec, RuntimeDebugNoBound};
+use scale_info::TypeInfo;
 use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight, SetId};
 
 /// A bounded list of Grandpa authorities with associated weights.
@@ -64,43 +64,3 @@ impl<T: Config<I>, I: 'static> From<StoredAuthoritySet<T, I>> for AuthoritySet {
 		AuthoritySet { authorities: t.authorities.into(), set_id: t.set_id }
 	}
 }
-
-/// A bounded chain header.
-#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebugNoBound)]
-pub struct StoredBridgedHeader<T: Config<I>, I: 'static>(pub BridgedHeader<T, I>);
-
-impl<T: Config<I>, I: 'static> StoredBridgedHeader<T, I> {
-	/// Construct `StoredBridgedHeader` from the `BridgedHeader` with all required checks.
-	pub fn try_from_bridged_header(header: BridgedHeader<T, I>) -> Result<Self, Error<T, I>> {
-		// this conversion is heavy (since we do encoding here), so we may want to optimize it later
-		// (e.g. by introducing custom Encode implementation, and turning `StoredBridgedHeader` into
-		// `enum StoredBridgedHeader { Decoded(BridgedHeader), Encoded(Vec<u8>) }`)
-		if header.encoded_size() > T::MaxBridgedHeaderSize::get() as usize {
-			Err(Error::TooLargeHeader)
-		} else {
-			Ok(StoredBridgedHeader(header))
-		}
-	}
-}
-
-impl<T: Config<I>, I: 'static> sp_std::ops::Deref for StoredBridgedHeader<T, I> {
-	type Target = BridgedHeader<T, I>;
-
-	fn deref(&self) -> &Self::Target {
-		&self.0
-	}
-}
-
-impl<T: Config<I>, I: 'static> TypeInfo for StoredBridgedHeader<T, I> {
-	type Identity = Self;
-
-	fn type_info() -> Type {
-		BridgedHeader::<T, I>::type_info()
-	}
-}
-
-impl<T: Config<I>, I: 'static> MaxEncodedLen for StoredBridgedHeader<T, I> {
-	fn max_encoded_len() -> usize {
-		T::MaxBridgedHeaderSize::get() as usize
-	}
-}
diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs
index 290bdadf04a731b1e6c9c63d7cc48022b01a84a2..ac21e997f3146d54ade402edefe354ae34b499af 100644
--- a/bridges/modules/parachains/src/lib.rs
+++ b/bridges/modules/parachains/src/lib.rs
@@ -69,11 +69,15 @@ pub mod pallet {
 	use super::*;
 	use bp_parachains::{BestParaHeadHash, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider};
 	use bp_runtime::{
-		BasicOperatingMode, OwnedBridgeModule, StorageDoubleMapKeyProvider, StorageMapKeyProvider,
+		BasicOperatingMode, BoundedStorageValue, OwnedBridgeModule, StorageDoubleMapKeyProvider,
+		StorageMapKeyProvider,
 	};
 	use frame_support::pallet_prelude::*;
 	use frame_system::pallet_prelude::*;
 
+	/// Stored parachain head of given parachains pallet.
+	pub type StoredParaHeadOf<T, I> =
+		BoundedStorageValue<<T as Config<I>>::MaxParaHeadSize, ParaHead>;
 	/// Weight info of the given parachains pallet.
 	pub type WeightInfoOf<T, I> = <T as Config<I>>::WeightInfo;
 
@@ -94,6 +98,12 @@ pub mod pallet {
 		},
 		/// The caller has provided obsolete parachain head, which is already known to the pallet.
 		RejectedObsoleteParachainHead { parachain: ParaId, parachain_head_hash: ParaHash },
+		/// The caller has provided parachain head that exceeds the maximal configured head size.
+		RejectedLargeParachainHead {
+			parachain: ParaId,
+			parachain_head_hash: ParaHash,
+			parachain_head_size: u32,
+		},
 		/// Parachain head has been updated.
 		UpdatedParachainHead { parachain: ParaId, parachain_head_hash: ParaHash },
 	}
@@ -156,6 +166,17 @@ pub mod pallet {
 		/// Incautious change of this constant may lead to orphan entries in the runtime storage.
 		#[pallet::constant]
 		type HeadsToKeep: Get<u32>;
+
+		/// Maximal size (in bytes) of the SCALE-encoded parachain head.
+		///
+		/// Keep in mind that the size of any tracked parachain header must not exceed this value.
+		/// So if you're going to track multiple parachains, one of which is storing large digests
+		/// in its headers, you shall choose this maximal value.
+		///
+		/// There's no mandatory headers in this pallet, so it can't stall if there's some header
+		/// that exceeds this bound.
+		#[pallet::constant]
+		type MaxParaHeadSize: Get<u32>;
 	}
 
 	/// Optional pallet owner.
@@ -196,7 +217,7 @@ pub mod pallet {
 		<ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Key1,
 		<ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Hasher2,
 		<ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Key2,
-		<ImportedParaHeadsKeyProvider as StorageDoubleMapKeyProvider>::Value,
+		StoredParaHeadOf<T, I>,
 	>;
 
 	/// A ring buffer of imported parachain head hashes. Ordered by the insertion time.
@@ -206,7 +227,6 @@ pub mod pallet {
 
 	#[pallet::pallet]
 	#[pallet::generate_store(pub(super) trait Store)]
-	#[pallet::without_storage_info]
 	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
 
 	impl<T: Config<I>, I: 'static> OwnedBridgeModule<T> for Pallet<T, I> {
@@ -381,12 +401,12 @@ pub mod pallet {
 		/// Get best finalized header of the given parachain.
 		pub fn best_parachain_head(parachain: ParaId) -> Option<ParaHead> {
 			let best_para_head_hash = ParasInfo::<T, I>::get(parachain)?.best_head_hash.head_hash;
-			ImportedParaHeads::<T, I>::get(parachain, best_para_head_hash)
+			ImportedParaHeads::<T, I>::get(parachain, best_para_head_hash).map(|h| h.into_inner())
 		}
 
 		/// Get parachain head with given hash.
 		pub fn parachain_head(parachain: ParaId, hash: ParaHash) -> Option<ParaHead> {
-			ImportedParaHeads::<T, I>::get(parachain, hash)
+			ImportedParaHeads::<T, I>::get(parachain, hash).map(|h| h.into_inner())
 		}
 
 		/// Verify that the passed storage proof is valid, given it is crafted using
@@ -478,12 +498,13 @@ pub mod pallet {
 		) -> Result<UpdateParachainHeadArtifacts, ()> {
 			// check if head has been already updated at better relay chain block. Without this
 			// check, we may import heads in random order
+			let err_log_prefix = "The parachain head can't be updated";
 			let is_valid = Self::validate_updated_parachain_head(
 				parachain,
 				&stored_best_head,
 				updated_at_relay_block_number,
 				updated_head_hash,
-				"The parachain head can't be updated",
+				err_log_prefix,
 			);
 			if !is_valid {
 				Self::deposit_event(Event::RejectedObsoleteParachainHead {
@@ -492,6 +513,30 @@ pub mod pallet {
 				});
 				return Err(())
 			}
+
+			// verify that the parachain head size is <= `MaxParaHeadSize`
+			let updated_head = match StoredParaHeadOf::<T, I>::try_from_inner(updated_head) {
+				Ok(updated_head) => updated_head,
+				Err(e) => {
+					log::trace!(
+						target: LOG_TARGET,
+						"{}. The parachain head size for {:?} is {}. It exceeds maximal configured size {}.",
+						err_log_prefix,
+						parachain,
+						e.value_size,
+						e.maximal_size,
+					);
+
+					Self::deposit_event(Event::RejectedLargeParachainHead {
+						parachain,
+						parachain_head_hash: updated_head_hash,
+						parachain_head_size: e.value_size as _,
+					});
+
+					return Err(())
+				},
+			};
+
 			let next_imported_hash_position = stored_best_head
 				.map_or(0, |stored_best_head| stored_best_head.next_imported_hash_position);
 
@@ -575,8 +620,8 @@ pub mod pallet {
 mod tests {
 	use super::*;
 	use crate::mock::{
-		run_test, test_relay_header, Event as TestEvent, Origin, TestRuntime, PARAS_PALLET_NAME,
-		UNTRACKED_PARACHAIN_ID,
+		run_test, test_relay_header, Event as TestEvent, Origin, TestRuntime,
+		MAXIMAL_PARACHAIN_HEAD_SIZE, PARAS_PALLET_NAME, UNTRACKED_PARACHAIN_ID,
 	};
 	use codec::Encode;
 
@@ -674,6 +719,12 @@ mod tests {
 		ParaHead((parachain, head_number).encode())
 	}
 
+	fn large_head_data(parachain: u32, head_number: u32) -> ParaHead {
+		ParaHead(
+			(parachain, head_number, vec![42u8; MAXIMAL_PARACHAIN_HEAD_SIZE as usize]).encode(),
+		)
+	}
+
 	fn head_hash(parachain: u32, head_number: u32) -> ParaHash {
 		head_data(parachain, head_number).hash()
 	}
@@ -770,18 +821,21 @@ mod tests {
 				ImportedParaHeads::<TestRuntime>::get(
 					ParaId(1),
 					initial_best_head(1).best_head_hash.head_hash
-				),
+				)
+				.map(|h| h.into_inner()),
 				Some(head_data(1, 0))
 			);
 			assert_eq!(
 				ImportedParaHeads::<TestRuntime>::get(
 					ParaId(2),
 					initial_best_head(2).best_head_hash.head_hash
-				),
+				)
+				.map(|h| h.into_inner()),
 				None
 			);
 			assert_eq!(
-				ImportedParaHeads::<TestRuntime>::get(ParaId(3), head_hash(3, 10)),
+				ImportedParaHeads::<TestRuntime>::get(ParaId(3), head_hash(3, 10))
+					.map(|h| h.into_inner()),
 				Some(head_data(3, 10))
 			);
 
@@ -830,11 +884,13 @@ mod tests {
 				})
 			);
 			assert_eq!(
-				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 5).hash()),
+				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 5).hash())
+					.map(|h| h.into_inner()),
 				Some(head_data(1, 5))
 			);
 			assert_eq!(
-				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 10).hash()),
+				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 10).hash())
+					.map(|h| h.into_inner()),
 				None
 			);
 			assert_eq!(
@@ -863,11 +919,13 @@ mod tests {
 				})
 			);
 			assert_eq!(
-				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 5).hash()),
+				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 5).hash())
+					.map(|h| h.into_inner()),
 				Some(head_data(1, 5))
 			);
 			assert_eq!(
-				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 10).hash()),
+				ImportedParaHeads::<TestRuntime>::get(ParaId(1), head_data(1, 10).hash())
+					.map(|h| h.into_inner()),
 				Some(head_data(1, 10))
 			);
 			assert_eq!(
@@ -1092,6 +1150,57 @@ mod tests {
 		});
 	}
 
+	#[test]
+	fn does_nothing_when_parachain_head_is_too_large() {
+		let (state_root, proof, parachains) =
+			prepare_parachain_heads_proof(vec![(1, head_data(1, 5)), (2, large_head_data(1, 5))]);
+		run_test(|| {
+			// start with relay block #0 and try to import head#5 of parachain#1 and untracked
+			// parachain
+			initialize(state_root);
+			let result = Pallet::<TestRuntime>::submit_parachain_heads(
+				Origin::signed(1),
+				(0, test_relay_header(0, state_root).hash()),
+				parachains,
+				proof,
+			);
+			assert_ok!(result);
+			assert_eq!(
+				ParasInfo::<TestRuntime>::get(ParaId(1)),
+				Some(ParaInfo {
+					best_head_hash: BestParaHeadHash {
+						at_relay_block_number: 0,
+						head_hash: head_data(1, 5).hash()
+					},
+					next_imported_hash_position: 1,
+				})
+			);
+			assert_eq!(ParasInfo::<TestRuntime>::get(ParaId(2)), None);
+			assert_eq!(
+				System::<TestRuntime>::events(),
+				vec![
+					EventRecord {
+						phase: Phase::Initialization,
+						event: TestEvent::Parachains(Event::UpdatedParachainHead {
+							parachain: ParaId(1),
+							parachain_head_hash: head_data(1, 5).hash(),
+						}),
+						topics: vec![],
+					},
+					EventRecord {
+						phase: Phase::Initialization,
+						event: TestEvent::Parachains(Event::RejectedLargeParachainHead {
+							parachain: ParaId(2),
+							parachain_head_hash: large_head_data(1, 5).hash(),
+							parachain_head_size: large_head_data(1, 5).encoded_size() as u32,
+						}),
+						topics: vec![],
+					},
+				],
+			);
+		});
+	}
+
 	#[test]
 	fn prunes_old_heads() {
 		run_test(|| {
diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs
index a31fb4c47f90e49a82d8e3dcbe9a411a73fd436f..f1d39592b20a4e89aff50b019e4bdafa494fe0bb 100644
--- a/bridges/modules/parachains/src/mock.rs
+++ b/bridges/modules/parachains/src/mock.rs
@@ -36,6 +36,7 @@ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<TestRunt
 
 pub const PARAS_PALLET_NAME: &str = "Paras";
 pub const UNTRACKED_PARACHAIN_ID: u32 = 10;
+pub const MAXIMAL_PARACHAIN_HEAD_SIZE: u32 = 512;
 
 construct_runtime! {
 	pub enum TestRuntime where
@@ -122,6 +123,7 @@ impl pallet_bridge_parachains::Config for TestRuntime {
 	type ParasPalletName = ParasPalletName;
 	type TrackedParachains = IsInVec<GetTenFirstParachains>;
 	type HeadsToKeep = HeadsToKeep;
+	type MaxParaHeadSize = frame_support::traits::ConstU32<MAXIMAL_PARACHAIN_HEAD_SIZE>;
 }
 
 #[derive(Debug)]
diff --git a/bridges/primitives/chain-rialto/src/lib.rs b/bridges/primitives/chain-rialto/src/lib.rs
index 9698da887794632544a938d2401d312c98d6f490..257e94d0273c28ad16fbb2364a89f1cb8a973a62 100644
--- a/bridges/primitives/chain-rialto/src/lib.rs
+++ b/bridges/primitives/chain-rialto/src/lib.rs
@@ -106,6 +106,9 @@ pub const MAX_AUTHORITIES_COUNT: u32 = 5;
 /// Maximal SCALE-encoded header size (in bytes) at Rialto.
 pub const MAX_HEADER_SIZE: u32 = 512;
 
+/// Maximal SCALE-encoded size of parachains headers that are stored at Rialto `Paras` pallet.
+pub const MAX_NESTED_PARACHAIN_HEAD_SIZE: u32 = MAX_HEADER_SIZE;
+
 /// Re-export `time_units` to make usage easier.
 pub use time_units::*;
 
diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs
index eeb30709accd75fe37f4f0599ba5821a0bf9cb36..2fd27094cb50f6fc82ba1c387887970b1ec3a42f 100644
--- a/bridges/primitives/chain-westend/src/lib.rs
+++ b/bridges/primitives/chain-westend/src/lib.rs
@@ -70,6 +70,9 @@ pub const MAX_AUTHORITIES_COUNT: u32 = 100_000;
 /// some fixed reserve for other things (digest, block hash and number, ...) as well.
 pub const MAX_HEADER_SIZE: u32 = 4096 + MAX_AUTHORITIES_COUNT * 40;
 
+/// Maximal SCALE-encoded size of parachains headers that are stored at Westend `Paras` pallet.
+pub const MAX_NESTED_PARACHAIN_HEAD_SIZE: u32 = MAX_HEADER_SIZE;
+
 /// Identifier of Westmint parachain at the Westend relay chain.
 pub const WESTMINT_PARACHAIN_ID: u32 = 2000;
 
diff --git a/bridges/primitives/parachains/src/lib.rs b/bridges/primitives/parachains/src/lib.rs
index 52b4954897308a74a260e08354109d4e4b8c96fa..f2edebf8a22a797fcf82e485acc66ee80c29a814 100644
--- a/bridges/primitives/parachains/src/lib.rs
+++ b/bridges/primitives/parachains/src/lib.rs
@@ -23,13 +23,13 @@ use bp_polkadot_core::{
 	BlockNumber as RelayBlockNumber,
 };
 use bp_runtime::{StorageDoubleMapKeyProvider, StorageMapKeyProvider};
-use codec::{Decode, Encode};
+use codec::{Decode, Encode, MaxEncodedLen};
 use frame_support::{Blake2_128Concat, RuntimeDebug, Twox64Concat};
 use scale_info::TypeInfo;
 use sp_core::storage::StorageKey;
 
 /// Best known parachain head hash.
-#[derive(Clone, Decode, Encode, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(Clone, Decode, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct BestParaHeadHash {
 	/// Number of relay block where this head has been read.
 	///
@@ -45,7 +45,7 @@ pub struct BestParaHeadHash {
 }
 
 /// Best known parachain head as it is stored in the runtime storage.
-#[derive(Decode, Encode, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(Decode, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct ParaInfo {
 	/// Best known parachain head hash.
 	pub best_head_hash: BestParaHeadHash,
diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs
index d775e09c47a005811a00e46cfa91c0a1cb5b3e69..94a231333ded7f8cd1924d122529bd56c8f13629 100644
--- a/bridges/primitives/runtime/src/lib.rs
+++ b/bridges/primitives/runtime/src/lib.rs
@@ -40,6 +40,7 @@ pub use storage_proof::{
 	record_all_keys as record_all_trie_keys, Error as StorageProofError,
 	ProofSize as StorageProofSize, StorageProofChecker,
 };
+pub use storage_types::BoundedStorageValue;
 
 #[cfg(feature = "std")]
 pub use storage_proof::craft_valid_storage_proof;
@@ -48,6 +49,7 @@ pub mod messages;
 
 mod chain;
 mod storage_proof;
+mod storage_types;
 
 // Re-export macro to aviod include paste dependency everywhere
 pub use sp_runtime::paste;
diff --git a/bridges/primitives/runtime/src/storage_types.rs b/bridges/primitives/runtime/src/storage_types.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b37f779d00b32c43bc7d280674446c01748d5b9b
--- /dev/null
+++ b/bridges/primitives/runtime/src/storage_types.rs
@@ -0,0 +1,90 @@
+// Copyright 2022 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Wrapper for a runtime storage value that checks if value exceeds given maximum
+//! during conversion.
+
+use codec::{Decode, Encode, MaxEncodedLen};
+use frame_support::{traits::Get, RuntimeDebug};
+use scale_info::{Type, TypeInfo};
+use sp_std::{marker::PhantomData, ops::Deref};
+
+/// Error that is returned when the value size exceeds maximal configured size.
+#[derive(RuntimeDebug)]
+pub struct MaximalSizeExceededError {
+	/// Size of the value.
+	pub value_size: usize,
+	/// Maximal configured size.
+	pub maximal_size: usize,
+}
+
+/// A bounded runtime storage value.
+#[derive(Clone, Decode, Encode, Eq, PartialEq)]
+pub struct BoundedStorageValue<B, V> {
+	value: V,
+	_phantom: PhantomData<B>,
+}
+
+impl<B, V: sp_std::fmt::Debug> sp_std::fmt::Debug for BoundedStorageValue<B, V> {
+	fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result {
+		self.value.fmt(fmt)
+	}
+}
+
+impl<B: Get<u32>, V: Encode> BoundedStorageValue<B, V> {
+	/// Construct `BoundedStorageValue` from the underlying `value` with all required checks.
+	///
+	/// Returns error if value size exceeds given bounds.
+	pub fn try_from_inner(value: V) -> Result<Self, MaximalSizeExceededError> {
+		// this conversion is heavy (since we do encoding here), so we may want to optimize it later
+		// (e.g. by introducing custom Encode implementation, and turning `BoundedStorageValue` into
+		// `enum BoundedStorageValue { Decoded(V), Encoded(Vec<u8>) }`)
+		let value_size = value.encoded_size();
+		let maximal_size = B::get() as usize;
+		if value_size > maximal_size {
+			Err(MaximalSizeExceededError { value_size, maximal_size })
+		} else {
+			Ok(BoundedStorageValue { value, _phantom: Default::default() })
+		}
+	}
+
+	/// Convert into the inner type
+	pub fn into_inner(self) -> V {
+		self.value
+	}
+}
+
+impl<B, V> Deref for BoundedStorageValue<B, V> {
+	type Target = V;
+
+	fn deref(&self) -> &Self::Target {
+		&self.value
+	}
+}
+
+impl<B: 'static, V: TypeInfo + 'static> TypeInfo for BoundedStorageValue<B, V> {
+	type Identity = Self;
+
+	fn type_info() -> Type {
+		V::type_info()
+	}
+}
+
+impl<B: Get<u32>, V: Encode> MaxEncodedLen for BoundedStorageValue<B, V> {
+	fn max_encoded_len() -> usize {
+		B::get() as usize
+	}
+}