diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs
index 8c97198393deffa97cf26253a4999f374aa231ec..ceedee12afabfa5be73eaa9fde5ac827fba46e7e 100644
--- a/bridges/bin/runtime-common/src/messages.rs
+++ b/bridges/bin/runtime-common/src/messages.rs
@@ -20,6 +20,8 @@
 //! pallet is used to dispatch incoming messages. Message identified by a tuple
 //! of to elements - message lane id and message nonce.
 
+pub use bp_runtime::{UnderlyingChainOf, UnderlyingChainProvider};
+
 use bp_header_chain::{HeaderChain, HeaderChainError};
 use bp_messages::{
 	source_chain::{LaneMessageVerifier, TargetHeaderChain},
@@ -28,14 +30,15 @@ use bp_messages::{
 	},
 	InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData,
 };
-use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, Size, StorageProofChecker};
-pub use bp_runtime::{UnderlyingChainOf, UnderlyingChainProvider};
+use bp_runtime::{
+	messages::MessageDispatchResult, Chain, ChainId, RawStorageProof, Size, StorageProofChecker,
+	StorageProofError,
+};
 use codec::{Decode, DecodeLimit, Encode};
 use frame_support::{traits::Get, weights::Weight, RuntimeDebug};
 use hash_db::Hasher;
 use scale_info::TypeInfo;
 use sp_std::{convert::TryFrom, fmt::Debug, marker::PhantomData, vec::Vec};
-use sp_trie::StorageProof;
 use xcm::latest::prelude::*;
 
 /// Bidirectional message bridge.
@@ -97,9 +100,6 @@ pub type OriginOf<C> = <C as ThisChainWithMessages>::RuntimeOrigin;
 /// Type of call that is used on this chain.
 pub type CallOf<C> = <C as ThisChainWithMessages>::RuntimeCall;
 
-/// Raw storage proof type (just raw trie nodes).
-pub type RawStorageProof = Vec<Vec<u8>>;
-
 /// Sub-module that is declaring types required for processing This -> Bridged chain messages.
 pub mod source {
 	use super::*;
@@ -274,8 +274,8 @@ pub mod source {
 			proof;
 		B::BridgedHeaderChain::parse_finalized_storage_proof(
 			bridged_header_hash,
-			StorageProof::new(storage_proof),
-			|storage| {
+			storage_proof,
+			|mut storage| {
 				// Messages delivery proof is just proof of single storage key read => any error
 				// is fatal.
 				let storage_inbound_lane_data_key =
@@ -290,6 +290,11 @@ pub mod source {
 				let inbound_lane_data = InboundLaneData::decode(&mut &raw_inbound_lane_data[..])
 					.map_err(|_| "Failed to decode inbound lane state from the proof")?;
 
+				// check that the storage proof doesn't have any untouched trie nodes
+				storage
+					.ensure_no_unused_nodes()
+					.map_err(|_| "Messages delivery proof has unused trie nodes")?;
+
 				Ok((lane, inbound_lane_data))
 			},
 		)
@@ -608,9 +613,9 @@ pub mod target {
 
 		B::BridgedHeaderChain::parse_finalized_storage_proof(
 			bridged_header_hash,
-			StorageProof::new(storage_proof),
+			storage_proof,
 			|storage| {
-				let parser =
+				let mut parser =
 					StorageProofCheckerAdapter::<_, B> { storage, _dummy: Default::default() };
 
 				// receiving proofs where end < begin is ok (if proof includes outbound lane state)
@@ -661,6 +666,12 @@ pub mod target {
 					return Err(MessageProofError::Empty)
 				}
 
+				// check that the storage proof doesn't have any untouched trie nodes
+				parser
+					.storage
+					.ensure_no_unused_nodes()
+					.map_err(MessageProofError::StorageProof)?;
+
 				// We only support single lane messages in this generated_schema
 				let mut proved_messages = ProvedMessages::new();
 				proved_messages.insert(lane, proved_lane_messages);
@@ -686,6 +697,8 @@ pub mod target {
 		FailedToDecodeMessage,
 		/// Failed to decode outbound lane data from the proof.
 		FailedToDecodeOutboundLaneState,
+		/// Storage proof related error.
+		StorageProof(StorageProofError),
 	}
 
 	impl From<MessageProofError> for &'static str {
@@ -700,6 +713,7 @@ pub mod target {
 					"Failed to decode message from the proof",
 				MessageProofError::FailedToDecodeOutboundLaneState =>
 					"Failed to decode outbound lane data from the proof",
+				MessageProofError::StorageProof(_) => "Invalid storage proof",
 			}
 		}
 	}
@@ -710,7 +724,7 @@ pub mod target {
 	}
 
 	impl<H: Hasher, B: MessageBridge> StorageProofCheckerAdapter<H, B> {
-		fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option<Vec<u8>> {
+		fn read_raw_outbound_lane_data(&mut self, lane_id: &LaneId) -> Option<Vec<u8>> {
 			let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key(
 				B::BRIDGED_MESSAGES_PALLET_NAME,
 				lane_id,
@@ -718,7 +732,7 @@ pub mod target {
 			self.storage.read_value(storage_outbound_lane_data_key.0.as_ref()).ok()?
 		}
 
-		fn read_raw_message(&self, message_key: &MessageKey) -> Option<Vec<u8>> {
+		fn read_raw_message(&mut self, message_key: &MessageKey) -> Option<Vec<u8>> {
 			let storage_message_key = bp_messages::storage_keys::message_key(
 				B::BRIDGED_MESSAGES_PALLET_NAME,
 				&message_key.lane_id,
@@ -928,7 +942,35 @@ mod tests {
 				);
 				target::verify_messages_proof::<OnThisChainBridge>(proof, 10)
 			}),
-			Err(target::MessageProofError::HeaderChain(HeaderChainError::StorageRootMismatch)),
+			Err(target::MessageProofError::HeaderChain(HeaderChainError::StorageProof(
+				StorageProofError::StorageRootMismatch
+			))),
+		);
+	}
+
+	#[test]
+	fn message_proof_is_rejected_if_it_has_duplicate_trie_nodes() {
+		assert_eq!(
+			using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| {
+				let node = proof.storage_proof.pop().unwrap();
+				proof.storage_proof.push(node.clone());
+				proof.storage_proof.push(node);
+				target::verify_messages_proof::<OnThisChainBridge>(proof, 10)
+			},),
+			Err(target::MessageProofError::HeaderChain(HeaderChainError::StorageProof(
+				StorageProofError::DuplicateNodesInProof
+			))),
+		);
+	}
+
+	#[test]
+	fn message_proof_is_rejected_if_it_has_unused_trie_nodes() {
+		assert_eq!(
+			using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| {
+				proof.storage_proof.push(vec![42]);
+				target::verify_messages_proof::<OnThisChainBridge>(proof, 10)
+			},),
+			Err(target::MessageProofError::StorageProof(StorageProofError::UnusedNodesInTheProof)),
 		);
 	}
 
diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs
index e7ca26d952ea88532da352a189b46ce23ee9c94e..1588c77633e230c796eac7dab471703e6b18aa39 100644
--- a/bridges/bin/runtime-common/src/messages_benchmarking.rs
+++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs
@@ -22,7 +22,7 @@
 use crate::{
 	messages::{
 		source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof,
-		AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, RawStorageProof, ThisChain,
+		AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain,
 	},
 	messages_generation::{
 		encode_all_messages, encode_lane_data, grow_trie, prepare_messages_storage_proof,
@@ -31,13 +31,15 @@ use crate::{
 
 use bp_messages::storage_keys;
 use bp_polkadot_core::parachains::ParaHash;
-use bp_runtime::{record_all_trie_keys, Chain, Parachain, StorageProofSize, UnderlyingChainOf};
+use bp_runtime::{
+	record_all_trie_keys, Chain, Parachain, RawStorageProof, StorageProofSize, UnderlyingChainOf,
+};
 use codec::Encode;
 use frame_support::weights::Weight;
 use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams};
 use sp_runtime::traits::{Header, Zero};
 use sp_std::prelude::*;
-use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, Recorder, TrieMut};
+use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut};
 
 /// Prepare proof of messages for the `receive_messages_proof` call.
 ///
@@ -209,15 +211,9 @@ where
 	root = grow_trie(root, &mut mdb, params.size);
 
 	// generate storage proof to be delivered to This chain
-	let mut proof_recorder = Recorder::<LayoutV1<HasherOf<BridgedChain<B>>>>::new();
-	record_all_trie_keys::<LayoutV1<HasherOf<BridgedChain<B>>>, _>(
-		&mdb,
-		&root,
-		&mut proof_recorder,
-	)
-	.map_err(|_| "record_all_trie_keys has failed")
-	.expect("record_all_trie_keys should not fail in benchmarks");
-	let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect();
+	let storage_proof = record_all_trie_keys::<LayoutV1<HasherOf<BridgedChain<B>>>, _>(&mdb, &root)
+		.map_err(|_| "record_all_trie_keys has failed")
+		.expect("record_all_trie_keys should not fail in benchmarks");
 
 	(root, storage_proof)
 }
diff --git a/bridges/bin/runtime-common/src/messages_generation.rs b/bridges/bin/runtime-common/src/messages_generation.rs
index 560033d122a84a86c693599571f1ff930dc81a6a..aec97c22800470decf47ba25872e1e0286323a40 100644
--- a/bridges/bin/runtime-common/src/messages_generation.rs
+++ b/bridges/bin/runtime-common/src/messages_generation.rs
@@ -18,16 +18,16 @@
 
 #![cfg(any(feature = "runtime-benchmarks", test))]
 
-use crate::messages::{BridgedChain, HashOf, HasherOf, MessageBridge, RawStorageProof};
+use crate::messages::{BridgedChain, HashOf, HasherOf, MessageBridge};
 
 use bp_messages::{
 	storage_keys, LaneId, MessageKey, MessageNonce, MessagePayload, OutboundLaneData,
 };
-use bp_runtime::{record_all_trie_keys, StorageProofSize};
+use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize};
 use codec::Encode;
 use sp_core::Hasher;
 use sp_std::{ops::RangeInclusive, prelude::*};
-use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, Recorder, TrieMut};
+use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut};
 
 /// Simple and correct message data encode function.
 pub(crate) fn encode_all_messages(_: MessageNonce, m: &MessagePayload) -> Option<Vec<u8>> {
@@ -97,15 +97,9 @@ where
 	root = grow_trie(root, &mut mdb, size);
 
 	// generate storage proof to be delivered to This chain
-	let mut proof_recorder = Recorder::<LayoutV1<HasherOf<BridgedChain<B>>>>::new();
-	record_all_trie_keys::<LayoutV1<HasherOf<BridgedChain<B>>>, _>(
-		&mdb,
-		&root,
-		&mut proof_recorder,
-	)
-	.map_err(|_| "record_all_trie_keys has failed")
-	.expect("record_all_trie_keys should not fail in benchmarks");
-	let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect();
+	let storage_proof = record_all_trie_keys::<LayoutV1<HasherOf<BridgedChain<B>>>, _>(&mdb, &root)
+		.map_err(|_| "record_all_trie_keys has failed")
+		.expect("record_all_trie_keys should not fail in benchmarks");
 
 	(root, storage_proof)
 }
@@ -125,11 +119,10 @@ pub fn grow_trie<H: Hasher>(
 	let mut key_index = 0;
 	loop {
 		// generate storage proof to be delivered to This chain
-		let mut proof_recorder = Recorder::<LayoutV1<H>>::new();
-		record_all_trie_keys::<LayoutV1<H>, _>(mdb, &root, &mut proof_recorder)
+		let storage_proof = record_all_trie_keys::<LayoutV1<H>, _>(mdb, &root)
 			.map_err(|_| "record_all_trie_keys has failed")
 			.expect("record_all_trie_keys should not fail in benchmarks");
-		let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum();
+		let size: usize = storage_proof.iter().map(|n| n.len()).sum();
 		if size > minimal_trie_size as _ {
 			return root
 		}
diff --git a/bridges/bin/runtime-common/src/parachains_benchmarking.rs b/bridges/bin/runtime-common/src/parachains_benchmarking.rs
index fcd32ea28b86e378126259495086f13bc0a7f274..e549e4f79b9f36ac4d81c4dbb2220ccc99d4efb9 100644
--- a/bridges/bin/runtime-common/src/parachains_benchmarking.rs
+++ b/bridges/bin/runtime-common/src/parachains_benchmarking.rs
@@ -29,7 +29,7 @@ use codec::Encode;
 use frame_support::traits::Get;
 use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber};
 use sp_std::prelude::*;
-use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, Recorder, TrieMut};
+use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut};
 
 /// Prepare proof of messages for the `receive_messages_proof` call.
 ///
@@ -72,11 +72,9 @@ where
 	state_root = grow_trie(state_root, &mut mdb, size);
 
 	// generate heads storage proof
-	let mut proof_recorder = Recorder::<LayoutV1<RelayBlockHasher>>::new();
-	record_all_trie_keys::<LayoutV1<RelayBlockHasher>, _>(&mdb, &state_root, &mut proof_recorder)
+	let proof = record_all_trie_keys::<LayoutV1<RelayBlockHasher>, _>(&mdb, &state_root)
 		.map_err(|_| "record_all_trie_keys has failed")
 		.expect("record_all_trie_keys should not fail in benchmarks");
-	let proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect();
 
 	let (relay_block_number, relay_block_hash) =
 		insert_header_to_grandpa_pallet::<R, R::BridgesGrandpaPalletInstance>(state_root);
diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs
index c2b794347958832e4d95deb31eb5e2c7c39be860..fca0cb1b758c754816bc95080dd3bd7ccd02aff6 100644
--- a/bridges/modules/grandpa/src/lib.rs
+++ b/bridges/modules/grandpa/src/lib.rs
@@ -1050,7 +1050,7 @@ mod tests {
 			assert_noop!(
 				Pallet::<TestRuntime>::parse_finalized_storage_proof(
 					Default::default(),
-					sp_trie::StorageProof::new(vec![]),
+					vec![],
 					|_| (),
 				),
 				bp_header_chain::HeaderChainError::UnknownHeader,
diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs
index 6875a3690f6af8ef2565e8b61eaf46e619b00c30..b9bbaac7ca3d401e990d51fbb33ae30778e8d3ac 100644
--- a/bridges/modules/parachains/src/lib.rs
+++ b/bridges/modules/parachains/src/lib.rs
@@ -330,10 +330,10 @@ pub mod pallet {
 
 			pallet_bridge_grandpa::Pallet::<T, T::BridgesGrandpaPalletInstance>::parse_finalized_storage_proof(
 				relay_block_hash,
-				sp_trie::StorageProof::new(parachain_heads_proof.0),
-				move |storage| {
+				parachain_heads_proof.0,
+				move |mut storage| {
 					for (parachain, parachain_head_hash) in parachains {
-						let parachain_head = match Pallet::<T, I>::read_parachain_head(&storage, parachain) {
+						let parachain_head = match Pallet::<T, I>::read_parachain_head(&mut storage, parachain) {
 							Ok(Some(parachain_head)) => parachain_head,
 							Ok(None) => {
 								log::trace!(
@@ -381,7 +381,10 @@ pub mod pallet {
 						}
 
 						// convert from parachain head into stored parachain head data
-						let parachain_head_data = match T::ParaStoredHeaderDataBuilder::try_build(parachain, &parachain_head) {
+						let parachain_head_data = match T::ParaStoredHeaderDataBuilder::try_build(
+							parachain,
+							&parachain_head,
+						) {
 							Some(parachain_head_data) => parachain_head_data,
 							None => {
 								log::trace!(
@@ -418,9 +421,20 @@ pub mod pallet {
 								.saturating_sub(WeightInfoOf::<T, I>::parachain_head_pruning_weight(T::DbWeight::get()));
 						}
 					}
+
+					// even though we may have accepted some parachain heads, we can't allow relayers to submit
+					// proof with unused trie nodes
+					// => treat this as an error
+					//
+					// (we can throw error here, because now all our calls are transactional)
+					storage.ensure_no_unused_nodes()
 				},
 			)
-			.map_err(|_| Error::<T, I>::InvalidStorageProof)?;
+			.and_then(|r| r.map_err(bp_header_chain::HeaderChainError::StorageProof))
+			.map_err(|e| {
+				log::trace!(target: LOG_TARGET, "Parachain heads storage proof is invalid: {:?}", e);
+				Error::<T, I>::InvalidStorageProof
+			})?;
 
 			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
@@ -488,7 +502,7 @@ pub mod pallet {
 
 		/// Read parachain head from storage proof.
 		fn read_parachain_head(
-			storage: &bp_runtime::StorageProofChecker<RelayBlockHasher>,
+			storage: &mut bp_runtime::StorageProofChecker<RelayBlockHasher>,
 			parachain: ParaId,
 		) -> Result<Option<ParaHead>, StorageProofError> {
 			let parachain_head_key =
@@ -705,7 +719,7 @@ mod tests {
 	use frame_system::{EventRecord, Pallet as System, Phase};
 	use sp_core::Hasher;
 	use sp_runtime::{traits::Header as HeaderT, DispatchError};
-	use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, Recorder, TrieMut};
+	use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut};
 
 	type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1;
 	type WeightInfo = <TestRuntime as Config>::WeightInfo;
@@ -759,11 +773,9 @@ mod tests {
 		}
 
 		// generate storage proof to be delivered to This chain
-		let mut proof_recorder = Recorder::<LayoutV1<RelayBlockHasher>>::new();
-		record_all_trie_keys::<LayoutV1<RelayBlockHasher>, _>(&mdb, &root, &mut proof_recorder)
+		let storage_proof = record_all_trie_keys::<LayoutV1<RelayBlockHasher>, _>(&mdb, &root)
 			.map_err(|_| "record_all_trie_keys has failed")
 			.expect("record_all_trie_keys should not fail in benchmarks");
-		let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect();
 
 		(root, ParaHeadsProof(storage_proof), parachains)
 	}
@@ -1447,7 +1459,7 @@ mod tests {
 
 	#[test]
 	fn ignores_parachain_head_if_it_is_missing_from_storage_proof() {
-		let (state_root, proof, _) = prepare_parachain_heads_proof(vec![(1, head_data(1, 0))]);
+		let (state_root, proof, _) = prepare_parachain_heads_proof(vec![]);
 		let parachains = vec![(ParaId(2), Default::default())];
 		run_test(|| {
 			initialize(state_root);
diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml
index 3b7ea58cb96a7e2f3aa2bdf8802bceccc6d32082..be87cce460e381611d65d3021ef3c224cc587a3e 100644
--- a/bridges/primitives/header-chain/Cargo.toml
+++ b/bridges/primitives/header-chain/Cargo.toml
@@ -23,7 +23,6 @@ sp-core = { git = "https://github.com/paritytech/substrate", branch = "master",
 sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
 bp-test-utils = { path = "../test-utils" }
@@ -43,5 +42,4 @@ std = [
 	"sp-finality-grandpa/std",
 	"sp-runtime/std",
 	"sp-std/std",
-	"sp-trie/std",
 ]
diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs
index 14e9395c03c09b7c8365a2f436db19793638a611..49d1ae894068f84bada51ae516a918576e75eb8f 100644
--- a/bridges/primitives/header-chain/src/lib.rs
+++ b/bridges/primitives/header-chain/src/lib.rs
@@ -19,35 +19,36 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
-use bp_runtime::{BasicOperatingMode, Chain, HashOf, HasherOf, HeaderOf, StorageProofChecker};
+use bp_runtime::{
+	BasicOperatingMode, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, StorageProofChecker,
+	StorageProofError,
+};
 use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen};
 use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
-use frame_support::PalletError;
 use scale_info::TypeInfo;
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
 use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID};
 use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug};
 use sp_std::boxed::Box;
-use sp_trie::StorageProof;
 
 pub mod justification;
 pub mod storage_keys;
 
 /// Header chain error.
-#[derive(Clone, Copy, Decode, Encode, Eq, PalletError, PartialEq, RuntimeDebug, TypeInfo)]
+#[derive(Clone, Eq, PartialEq, RuntimeDebug)]
 pub enum HeaderChainError {
 	/// Header with given hash is missing from the chain.
 	UnknownHeader,
-	/// The storage proof doesn't contains storage root.
-	StorageRootMismatch,
+	/// Storage proof related error.
+	StorageProof(StorageProofError),
 }
 
 impl From<HeaderChainError> for &'static str {
 	fn from(err: HeaderChainError) -> &'static str {
 		match err {
 			HeaderChainError::UnknownHeader => "UnknownHeader",
-			HeaderChainError::StorageRootMismatch => "StorageRootMismatch",
+			HeaderChainError::StorageProof(e) => e.into(),
 		}
 	}
 }
@@ -83,13 +84,13 @@ pub trait HeaderChain<C: Chain> {
 	/// Parse storage proof using finalized header.
 	fn parse_finalized_storage_proof<R>(
 		header_hash: HashOf<C>,
-		storage_proof: StorageProof,
+		storage_proof: RawStorageProof,
 		parse: impl FnOnce(StorageProofChecker<HasherOf<C>>) -> R,
 	) -> Result<R, HeaderChainError> {
 		let state_root = Self::finalized_header_state_root(header_hash)
 			.ok_or(HeaderChainError::UnknownHeader)?;
 		let storage_proof_checker = bp_runtime::StorageProofChecker::new(state_root, storage_proof)
-			.map_err(|_| HeaderChainError::StorageRootMismatch)?;
+			.map_err(HeaderChainError::StorageProof)?;
 
 		Ok(parse(storage_proof_checker))
 	}
diff --git a/bridges/primitives/polkadot-core/src/parachains.rs b/bridges/primitives/polkadot-core/src/parachains.rs
index e8f68dd2a9a165a8fc6a86acd8e51b7e0951d068..0b410dff49f758c88f7e2c52a62fad35bd8bb25b 100644
--- a/bridges/primitives/polkadot-core/src/parachains.rs
+++ b/bridges/primitives/polkadot-core/src/parachains.rs
@@ -22,7 +22,7 @@
 //! chains. Having pallets that are referencing polkadot, would mean that there may
 //! be two versions of polkadot crates included in the runtime. Which is bad.
 
-use bp_runtime::Size;
+use bp_runtime::{RawStorageProof, Size};
 use codec::{CompactAs, Decode, Encode, MaxEncodedLen};
 use frame_support::RuntimeDebug;
 use scale_info::TypeInfo;
@@ -88,7 +88,7 @@ pub type ParaHasher = crate::Hasher;
 
 /// Raw storage proof of parachain heads, stored in polkadot-like chain runtime.
 #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)]
-pub struct ParaHeadsProof(pub Vec<Vec<u8>>);
+pub struct ParaHeadsProof(pub RawStorageProof);
 
 impl Size for ParaHeadsProof {
 	fn size(&self) -> u32 {
diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs
index 83ffd238ab50214be4029d2b6be87086a36689ef..75151ccb72372f8f011473dd5fe71524b8f279fb 100644
--- a/bridges/primitives/runtime/src/lib.rs
+++ b/bridges/primitives/runtime/src/lib.rs
@@ -39,7 +39,7 @@ pub use frame_support::storage::storage_prefix as storage_value_final_key;
 use num_traits::{CheckedSub, One};
 pub use storage_proof::{
 	record_all_keys as record_all_trie_keys, Error as StorageProofError,
-	ProofSize as StorageProofSize, StorageProofChecker,
+	ProofSize as StorageProofSize, RawStorageProof, StorageProofChecker,
 };
 pub use storage_types::BoundedStorageValue;
 
diff --git a/bridges/primitives/runtime/src/storage_proof.rs b/bridges/primitives/runtime/src/storage_proof.rs
index e1465d2fa16577e8b5adf9339513df1d8090841b..133a51ce6e7f5f570f7a36e6a8a20eed7f510c9b 100644
--- a/bridges/primitives/runtime/src/storage_proof.rs
+++ b/bridges/primitives/runtime/src/storage_proof.rs
@@ -16,15 +16,18 @@
 
 //! Logic for checking Substrate storage proofs.
 
-use codec::Decode;
+use codec::{Decode, Encode};
 use hash_db::{HashDB, Hasher, EMPTY_PREFIX};
 use sp_runtime::RuntimeDebug;
-use sp_std::{boxed::Box, vec::Vec};
+use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec};
 use sp_trie::{
 	read_trie_value, LayoutV1, MemoryDB, Recorder, StorageProof, Trie, TrieConfiguration,
 	TrieDBBuilder, TrieError, TrieHash,
 };
 
+/// Raw storage proof type (just raw trie nodes).
+pub type RawStorageProof = Vec<Vec<u8>>;
+
 /// Storage proof size requirements.
 ///
 /// This is currently used by benchmarks when generating storage proofs.
@@ -48,8 +51,10 @@ pub struct StorageProofChecker<H>
 where
 	H: Hasher,
 {
+	proof_nodes_count: usize,
 	root: H::Out,
 	db: MemoryDB<H>,
+	recorder: Recorder<LayoutV1<H>>,
 }
 
 impl<H> StorageProofChecker<H>
@@ -59,28 +64,59 @@ where
 	/// Constructs a new storage proof checker.
 	///
 	/// This returns an error if the given proof is invalid with respect to the given root.
-	pub fn new(root: H::Out, proof: StorageProof) -> Result<Self, Error> {
+	pub fn new(root: H::Out, proof: RawStorageProof) -> Result<Self, Error> {
+		// 1. we don't want extra items in the storage proof
+		// 2. `StorageProof` is storing all trie nodes in the `BTreeSet`
+		//
+		// => someone could simply add duplicate items to the proof and we won't be
+		// able to detect that by just using `StorageProof`
+		//
+		// => let's check it when we are converting our "raw proof" into `StorageProof`
+		let proof_nodes_count = proof.len();
+		let proof = StorageProof::new(proof);
+		if proof_nodes_count != proof.iter_nodes().count() {
+			return Err(Error::DuplicateNodesInProof)
+		}
+
 		let db = proof.into_memory_db();
 		if !db.contains(&root, EMPTY_PREFIX) {
 			return Err(Error::StorageRootMismatch)
 		}
 
-		let checker = StorageProofChecker { root, db };
+		let recorder = Recorder::default();
+		let checker = StorageProofChecker { proof_nodes_count, root, db, recorder };
 		Ok(checker)
 	}
 
+	/// Returns error if the proof has some nodes that are left intact by previous `read_value`
+	/// calls.
+	pub fn ensure_no_unused_nodes(mut self) -> Result<(), Error> {
+		let visited_nodes = self
+			.recorder
+			.drain()
+			.into_iter()
+			.map(|record| record.data)
+			.collect::<BTreeSet<_>>();
+		let visited_nodes_count = visited_nodes.len();
+		if self.proof_nodes_count == visited_nodes_count {
+			Ok(())
+		} else {
+			Err(Error::UnusedNodesInTheProof)
+		}
+	}
+
 	/// Reads a value from the available subset of storage. If the value cannot be read due to an
 	/// incomplete or otherwise invalid proof, this function returns an error.
-	pub fn read_value(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
+	pub fn read_value(&mut self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
 		// LayoutV1 or LayoutV0 is identical for proof that only read values.
-		read_trie_value::<LayoutV1<H>, _>(&self.db, &self.root, key, None, None)
+		read_trie_value::<LayoutV1<H>, _>(&self.db, &self.root, key, Some(&mut self.recorder), None)
 			.map_err(|_| Error::StorageValueUnavailable)
 	}
 
 	/// Reads and decodes a value from the available subset of storage. If the value cannot be read
 	/// due to an incomplete or otherwise invalid proof, this function returns an error. If value is
 	/// read, but decoding fails, this function returns an error.
-	pub fn read_and_decode_value<T: Decode>(&self, key: &[u8]) -> Result<Option<T>, Error> {
+	pub fn read_and_decode_value<T: Decode>(&mut self, key: &[u8]) -> Result<Option<T>, Error> {
 		self.read_value(key).and_then(|v| {
 			v.map(|v| T::decode(&mut &v[..]).map_err(Error::StorageValueDecodeFailed))
 				.transpose()
@@ -88,19 +124,39 @@ where
 	}
 }
 
-#[derive(Eq, RuntimeDebug, PartialEq)]
+/// Storage proof related errors.
+#[derive(Clone, Eq, PartialEq, RuntimeDebug)]
 pub enum Error {
+	/// Duplicate trie nodes are found in the proof.
+	DuplicateNodesInProof,
+	/// Unused trie nodes are found in the proof.
+	UnusedNodesInTheProof,
+	/// Expected storage root is missing from the proof.
 	StorageRootMismatch,
+	/// Unable to reach expected storage value using provided trie nodes.
 	StorageValueUnavailable,
+	/// Failed to decode storage value.
 	StorageValueDecodeFailed(codec::Error),
 }
 
+impl From<Error> for &'static str {
+	fn from(err: Error) -> &'static str {
+		match err {
+			Error::DuplicateNodesInProof => "Storage proof contains duplicate nodes",
+			Error::UnusedNodesInTheProof => "Storage proof contains unused nodes",
+			Error::StorageRootMismatch => "Storage root is missing from the storage proof",
+			Error::StorageValueUnavailable => "Storage value is missing from the storage proof",
+			Error::StorageValueDecodeFailed(_) =>
+				"Failed to decode storage value from the storage proof",
+		}
+	}
+}
+
 /// Return valid storage proof and state root.
 ///
 /// NOTE: This should only be used for **testing**.
 #[cfg(feature = "std")]
-pub fn craft_valid_storage_proof() -> (sp_core::H256, StorageProof) {
-	use codec::Encode;
+pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) {
 	use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend};
 
 	let state_version = sp_runtime::StateVersion::default();
@@ -121,25 +177,33 @@ pub fn craft_valid_storage_proof() -> (sp_core::H256, StorageProof) {
 	let proof =
 		prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key4"[..], &b"key22"[..]]).unwrap();
 
-	(root, proof)
+	(root, proof.into_nodes().into_iter().collect())
 }
 
 /// Record all keys for a given root.
 pub fn record_all_keys<L: TrieConfiguration, DB>(
 	db: &DB,
 	root: &TrieHash<L>,
-	recorder: &mut Recorder<L>,
-) -> Result<(), Box<TrieError<L>>>
+) -> Result<RawStorageProof, Box<TrieError<L>>>
 where
 	DB: hash_db::HashDBRef<L::Hash, trie_db::DBValue>,
 {
-	let trie = TrieDBBuilder::<L>::new(db, root).with_recorder(recorder).build();
+	let mut recorder = Recorder::<L>::new();
+	let trie = TrieDBBuilder::<L>::new(db, root).with_recorder(&mut recorder).build();
 	for x in trie.iter()? {
 		let (key, _) = x?;
 		trie.get(&key)?;
 	}
 
-	Ok(())
+	// recorder may record the same trie node multiple times and we don't want duplicate nodes
+	// in our proofs => let's deduplicate it by collecting to the BTreeSet first
+	Ok(recorder
+		.drain()
+		.into_iter()
+		.map(|n| n.data.to_vec())
+		.collect::<BTreeSet<_>>()
+		.into_iter()
+		.collect())
 }
 
 #[cfg(test)]
@@ -152,7 +216,7 @@ pub mod tests {
 		let (root, proof) = craft_valid_storage_proof();
 
 		// check proof in runtime
-		let checker =
+		let mut checker =
 			<StorageProofChecker<sp_core::Blake2Hasher>>::new(root, proof.clone()).unwrap();
 		assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec())));
 		assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec())));
@@ -171,4 +235,31 @@ pub mod tests {
 			Some(Error::StorageRootMismatch)
 		);
 	}
+
+	#[test]
+	fn proof_with_duplicate_items_is_rejected() {
+		let (root, mut proof) = craft_valid_storage_proof();
+		proof.push(proof.first().unwrap().clone());
+
+		assert_eq!(
+			StorageProofChecker::<sp_core::Blake2Hasher>::new(root, proof).map(drop),
+			Err(Error::DuplicateNodesInProof),
+		);
+	}
+
+	#[test]
+	fn proof_with_unused_items_is_rejected() {
+		let (root, proof) = craft_valid_storage_proof();
+
+		let mut checker =
+			StorageProofChecker::<sp_core::Blake2Hasher>::new(root, proof.clone()).unwrap();
+		checker.read_value(b"key1").unwrap();
+		checker.read_value(b"key2").unwrap();
+		checker.read_value(b"key4").unwrap();
+		checker.read_value(b"key22").unwrap();
+		assert_eq!(checker.ensure_no_unused_nodes(), Ok(()));
+
+		let checker = StorageProofChecker::<sp_core::Blake2Hasher>::new(root, proof).unwrap();
+		assert_eq!(checker.ensure_no_unused_nodes(), Err(Error::UnusedNodesInTheProof));
+	}
 }