From 53bcbb15f1de62661671ad8099b9dc3f8b623efd Mon Sep 17 00:00:00 2001
From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Date: Mon, 15 Jan 2024 16:03:32 +0200
Subject: [PATCH] archive: Implement archive_unstable_storage (#1846)

This PR implements the `archive_unstable_storage` method that offers
support for:
- fetching values
- fetching hashes
- iterating over keys and values
- iterating over keys and hashes
- fetching merkle values from the trie-db

A common component dedicated to RPC-V2 storage queries is created to
bridge the gap between `chainHead/storage` and `archive/storage`.
Query pagination is supported by `paginationStartKey`, similar to the
old APIs.
Similarly to the `chainHead/storage`, the `archive/storage` method
accepts a maximum number of queried items.

The design builds upon:
https://github.com/paritytech/json-rpc-interface-spec/pull/94.
Closes https://github.com/paritytech/polkadot-sdk/issues/1512.

cc @paritytech/subxt-team

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
---
 .../client/rpc-spec-v2/src/archive/api.rs     |  18 +-
 .../client/rpc-spec-v2/src/archive/archive.rs |  70 ++-
 .../src/archive/archive_storage.rs            | 125 ++++
 .../client/rpc-spec-v2/src/archive/mod.rs     |   2 +
 .../client/rpc-spec-v2/src/archive/tests.rs   | 547 +++++++++++++++++-
 .../client/rpc-spec-v2/src/chain_head/api.rs  |   5 +-
 .../rpc-spec-v2/src/chain_head/chain_head.rs  |   6 +-
 .../src/chain_head/chain_head_storage.rs      | 210 +------
 .../rpc-spec-v2/src/chain_head/event.rs       | 146 +----
 .../client/rpc-spec-v2/src/chain_head/mod.rs  |   7 -
 .../rpc-spec-v2/src/chain_head/tests.rs       |   7 +-
 .../client/rpc-spec-v2/src/common/events.rs   | 273 +++++++++
 .../client/rpc-spec-v2/src/common/mod.rs      |  17 +
 .../client/rpc-spec-v2/src/common/storage.rs  | 198 +++++++
 substrate/client/rpc-spec-v2/src/lib.rs       |  10 +-
 15 files changed, 1278 insertions(+), 363 deletions(-)
 create mode 100644 substrate/client/rpc-spec-v2/src/archive/archive_storage.rs
 create mode 100644 substrate/client/rpc-spec-v2/src/common/events.rs
 create mode 100644 substrate/client/rpc-spec-v2/src/common/mod.rs
 create mode 100644 substrate/client/rpc-spec-v2/src/common/storage.rs

diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs
index 0583111cb48..b1973830400 100644
--- a/substrate/client/rpc-spec-v2/src/archive/api.rs
+++ b/substrate/client/rpc-spec-v2/src/archive/api.rs
@@ -18,7 +18,10 @@
 
 //! API trait of the archive methods.
 
-use crate::MethodResult;
+use crate::{
+	common::events::{ArchiveStorageResult, PaginatedStorageQuery},
+	MethodResult,
+};
 use jsonrpsee::{core::RpcResult, proc_macros::rpc};
 
 #[rpc(client, server)]
@@ -88,4 +91,17 @@ pub trait ArchiveApi<Hash> {
 		function: String,
 		call_parameters: String,
 	) -> RpcResult<MethodResult>;
+
+	/// Returns storage entries at a specific block's state.
+	///
+	/// # Unstable
+	///
+	/// This method is unstable and subject to change in the future.
+	#[method(name = "archive_unstable_storage", blocking)]
+	fn archive_unstable_storage(
+		&self,
+		hash: Hash,
+		items: Vec<PaginatedStorageQuery<String>>,
+		child_trie: Option<String>,
+	) -> RpcResult<ArchiveStorageResult>;
 }
diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs
index 269962cfd74..c01afb5d779 100644
--- a/substrate/client/rpc-spec-v2/src/archive/archive.rs
+++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs
@@ -20,14 +20,15 @@
 
 use crate::{
 	archive::{error::Error as ArchiveError, ArchiveApiServer},
-	chain_head::hex_string,
-	MethodResult,
+	common::events::{ArchiveStorageResult, PaginatedStorageQuery},
+	hex_string, MethodResult,
 };
 
 use codec::Encode;
 use jsonrpsee::core::{async_trait, RpcResult};
 use sc_client_api::{
-	Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, StorageProvider,
+	Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey,
+	StorageProvider,
 };
 use sp_api::{CallApiAt, CallContext};
 use sp_blockchain::{
@@ -40,6 +41,8 @@ use sp_runtime::{
 };
 use std::{collections::HashSet, marker::PhantomData, sync::Arc};
 
+use super::archive_storage::ArchiveStorage;
+
 /// An API for archive RPC calls.
 pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> {
 	/// Substrate client.
@@ -48,8 +51,12 @@ pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> {
 	backend: Arc<BE>,
 	/// The hexadecimal encoded hash of the genesis block.
 	genesis_hash: String,
+	/// The maximum number of reported items by the `archive_storage` at a time.
+	storage_max_reported_items: usize,
+	/// The maximum number of queried items allowed for the `archive_storage` at a time.
+	storage_max_queried_items: usize,
 	/// Phantom member to pin the block type.
-	_phantom: PhantomData<(Block, BE)>,
+	_phantom: PhantomData<Block>,
 }
 
 impl<BE: Backend<Block>, Block: BlockT, Client> Archive<BE, Block, Client> {
@@ -58,9 +65,18 @@ impl<BE: Backend<Block>, Block: BlockT, Client> Archive<BE, Block, Client> {
 		client: Arc<Client>,
 		backend: Arc<BE>,
 		genesis_hash: GenesisHash,
+		storage_max_reported_items: usize,
+		storage_max_queried_items: usize,
 	) -> Self {
 		let genesis_hash = hex_string(&genesis_hash.as_ref());
-		Self { client, backend, genesis_hash, _phantom: PhantomData }
+		Self {
+			client,
+			backend,
+			genesis_hash,
+			storage_max_reported_items,
+			storage_max_queried_items,
+			_phantom: PhantomData,
+		}
 	}
 }
 
@@ -185,4 +201,48 @@ where
 			Err(error) => MethodResult::err(error.to_string()),
 		})
 	}
+
+	fn archive_unstable_storage(
+		&self,
+		hash: Block::Hash,
+		items: Vec<PaginatedStorageQuery<String>>,
+		child_trie: Option<String>,
+	) -> RpcResult<ArchiveStorageResult> {
+		let items = items
+			.into_iter()
+			.map(|query| {
+				let key = StorageKey(parse_hex_param(query.key)?);
+				let pagination_start_key = query
+					.pagination_start_key
+					.map(|key| parse_hex_param(key).map(|key| StorageKey(key)))
+					.transpose()?;
+
+				// Paginated start key is only supported
+				if pagination_start_key.is_some() && !query.query_type.is_descendant_query() {
+					return Err(ArchiveError::InvalidParam(
+						"Pagination start key is only supported for descendants queries"
+							.to_string(),
+					))
+				}
+
+				Ok(PaginatedStorageQuery {
+					key,
+					query_type: query.query_type,
+					pagination_start_key,
+				})
+			})
+			.collect::<Result<Vec<_>, ArchiveError>>()?;
+
+		let child_trie = child_trie
+			.map(|child_trie| parse_hex_param(child_trie))
+			.transpose()?
+			.map(ChildInfo::new_default_from_vec);
+
+		let storage_client = ArchiveStorage::new(
+			self.client.clone(),
+			self.storage_max_reported_items,
+			self.storage_max_queried_items,
+		);
+		Ok(storage_client.handle_query(hash, items, child_trie))
+	}
 }
diff --git a/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs
new file mode 100644
index 00000000000..09415af1ca1
--- /dev/null
+++ b/substrate/client/rpc-spec-v2/src/archive/archive_storage.rs
@@ -0,0 +1,125 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Implementation of the `archive_storage` method.
+
+use std::sync::Arc;
+
+use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider};
+use sp_runtime::traits::Block as BlockT;
+
+use crate::common::{
+	events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType},
+	storage::{IterQueryType, QueryIter, Storage},
+};
+
+/// Generates the events of the `chainHead_storage` method.
+pub struct ArchiveStorage<Client, Block, BE> {
+	/// Storage client.
+	client: Storage<Client, Block, BE>,
+	/// The maximum number of reported items by the `archive_storage` at a time.
+	storage_max_reported_items: usize,
+	/// The maximum number of queried items allowed for the `archive_storage` at a time.
+	storage_max_queried_items: usize,
+}
+
+impl<Client, Block, BE> ArchiveStorage<Client, Block, BE> {
+	/// Constructs a new [`ArchiveStorage`].
+	pub fn new(
+		client: Arc<Client>,
+		storage_max_reported_items: usize,
+		storage_max_queried_items: usize,
+	) -> Self {
+		Self { client: Storage::new(client), storage_max_reported_items, storage_max_queried_items }
+	}
+}
+
+impl<Client, Block, BE> ArchiveStorage<Client, Block, BE>
+where
+	Block: BlockT + 'static,
+	BE: Backend<Block> + 'static,
+	Client: StorageProvider<Block, BE> + 'static,
+{
+	/// Generate the response of the `archive_storage` method.
+	pub fn handle_query(
+		&self,
+		hash: Block::Hash,
+		mut items: Vec<PaginatedStorageQuery<StorageKey>>,
+		child_key: Option<ChildInfo>,
+	) -> ArchiveStorageResult {
+		let discarded_items = items.len().saturating_sub(self.storage_max_queried_items);
+		items.truncate(self.storage_max_queried_items);
+
+		let mut storage_results = Vec::with_capacity(items.len());
+		for item in items {
+			match item.query_type {
+				StorageQueryType::Value => {
+					match self.client.query_value(hash, &item.key, child_key.as_ref()) {
+						Ok(Some(value)) => storage_results.push(value),
+						Ok(None) => continue,
+						Err(error) => return ArchiveStorageResult::err(error),
+					}
+				},
+				StorageQueryType::Hash =>
+					match self.client.query_hash(hash, &item.key, child_key.as_ref()) {
+						Ok(Some(value)) => storage_results.push(value),
+						Ok(None) => continue,
+						Err(error) => return ArchiveStorageResult::err(error),
+					},
+				StorageQueryType::ClosestDescendantMerkleValue =>
+					match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) {
+						Ok(Some(value)) => storage_results.push(value),
+						Ok(None) => continue,
+						Err(error) => return ArchiveStorageResult::err(error),
+					},
+				StorageQueryType::DescendantsValues => {
+					match self.client.query_iter_pagination(
+						QueryIter {
+							query_key: item.key,
+							ty: IterQueryType::Value,
+							pagination_start_key: item.pagination_start_key,
+						},
+						hash,
+						child_key.as_ref(),
+						self.storage_max_reported_items,
+					) {
+						Ok((results, _)) => storage_results.extend(results),
+						Err(error) => return ArchiveStorageResult::err(error),
+					}
+				},
+				StorageQueryType::DescendantsHashes => {
+					match self.client.query_iter_pagination(
+						QueryIter {
+							query_key: item.key,
+							ty: IterQueryType::Hash,
+							pagination_start_key: item.pagination_start_key,
+						},
+						hash,
+						child_key.as_ref(),
+						self.storage_max_reported_items,
+					) {
+						Ok((results, _)) => storage_results.extend(results),
+						Err(error) => return ArchiveStorageResult::err(error),
+					}
+				},
+			};
+		}
+
+		ArchiveStorageResult::ok(storage_results, discarded_items)
+	}
+}
diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs
index eb7d71d702f..e1f45e19a62 100644
--- a/substrate/client/rpc-spec-v2/src/archive/mod.rs
+++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs
@@ -25,6 +25,8 @@
 #[cfg(test)]
 mod tests;
 
+mod archive_storage;
+
 pub mod api;
 pub mod archive;
 pub mod error;
diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs
index 6b288c2c954..45da8e588e6 100644
--- a/substrate/client/rpc-spec-v2/src/archive/tests.rs
+++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs
@@ -16,7 +16,13 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::{chain_head::hex_string, MethodResult};
+use crate::{
+	common::events::{
+		ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType,
+		StorageResultType,
+	},
+	hex_string, MethodResult,
+};
 
 use super::{archive::Archive, *};
 
@@ -24,17 +30,20 @@ use assert_matches::assert_matches;
 use codec::{Decode, Encode};
 use jsonrpsee::{
 	core::error::Error,
+	rpc_params,
 	types::{error::CallError, EmptyServerParams as EmptyParams},
 	RpcModule,
 };
 use sc_block_builder::BlockBuilderBuilder;
+use sc_client_api::ChildInfo;
 use sp_blockchain::HeaderBackend;
 use sp_consensus::BlockOrigin;
+use sp_core::{Blake2Hasher, Hasher};
 use sp_runtime::{
 	traits::{Block as BlockT, Header as HeaderT},
 	SaturatedConversion,
 };
-use std::sync::Arc;
+use std::{collections::HashMap, sync::Arc};
 use substrate_test_runtime::Transfer;
 use substrate_test_runtime_client::{
 	prelude::*, runtime, Backend, BlockBuilderExt, Client, ClientBlockImportExt,
@@ -42,23 +51,39 @@ use substrate_test_runtime_client::{
 
 const CHAIN_GENESIS: [u8; 32] = [0; 32];
 const INVALID_HASH: [u8; 32] = [1; 32];
+const MAX_PAGINATION_LIMIT: usize = 5;
+const MAX_QUERIED_LIMIT: usize = 5;
+const KEY: &[u8] = b":mock";
+const VALUE: &[u8] = b"hello world";
+const CHILD_STORAGE_KEY: &[u8] = b"child";
+const CHILD_VALUE: &[u8] = b"child value";
 
 type Header = substrate_test_runtime_client::runtime::Header;
 type Block = substrate_test_runtime_client::runtime::Block;
 
-fn setup_api() -> (Arc<Client<Backend>>, RpcModule<Archive<Backend, Block, Client<Backend>>>) {
-	let builder = TestClientBuilder::new();
+fn setup_api(
+	max_returned_items: usize,
+	max_queried_items: usize,
+) -> (Arc<Client<Backend>>, RpcModule<Archive<Backend, Block, Client<Backend>>>) {
+	let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY);
+	let builder = TestClientBuilder::new().add_extra_child_storage(
+		&child_info,
+		KEY.to_vec(),
+		CHILD_VALUE.to_vec(),
+	);
 	let backend = builder.backend();
 	let client = Arc::new(builder.build());
 
-	let api = Archive::new(client.clone(), backend, CHAIN_GENESIS).into_rpc();
+	let api =
+		Archive::new(client.clone(), backend, CHAIN_GENESIS, max_returned_items, max_queried_items)
+			.into_rpc();
 
 	(client, api)
 }
 
 #[tokio::test]
 async fn archive_genesis() {
-	let (_client, api) = setup_api();
+	let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
 
 	let genesis: String =
 		api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap();
@@ -67,7 +92,7 @@ async fn archive_genesis() {
 
 #[tokio::test]
 async fn archive_body() {
-	let (mut client, api) = setup_api();
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
 
 	// Invalid block hash.
 	let invalid_hash = hex_string(&INVALID_HASH);
@@ -101,7 +126,7 @@ async fn archive_body() {
 
 #[tokio::test]
 async fn archive_header() {
-	let (mut client, api) = setup_api();
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
 
 	// Invalid block hash.
 	let invalid_hash = hex_string(&INVALID_HASH);
@@ -135,7 +160,7 @@ async fn archive_header() {
 
 #[tokio::test]
 async fn archive_finalized_height() {
-	let (client, api) = setup_api();
+	let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
 
 	let client_height: u32 = client.info().finalized_number.saturated_into();
 
@@ -147,7 +172,7 @@ async fn archive_finalized_height() {
 
 #[tokio::test]
 async fn archive_hash_by_height() {
-	let (mut client, api) = setup_api();
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
 
 	// Genesis height.
 	let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [0]).await.unwrap();
@@ -253,7 +278,7 @@ async fn archive_hash_by_height() {
 
 #[tokio::test]
 async fn archive_call() {
-	let (mut client, api) = setup_api();
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
 	let invalid_hash = hex_string(&INVALID_HASH);
 
 	// Invalid parameter (non-hex).
@@ -309,3 +334,503 @@ async fn archive_call() {
 	let expected = MethodResult::ok("0x0000000000000000");
 	assert_eq!(result, expected);
 }
+
+#[tokio::test]
+async fn archive_storage_hashes_values() {
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
+
+	let block = BlockBuilderBuilder::new(&*client)
+		.on_parent_block(client.chain_info().genesis_hash)
+		.with_parent_block_number(0)
+		.build()
+		.unwrap()
+		.build()
+		.unwrap()
+		.block;
+	client.import(BlockOrigin::Own, block.clone()).await.unwrap();
+	let block_hash = format!("{:?}", block.header.hash());
+	let key = hex_string(&KEY);
+
+	let items: Vec<PaginatedStorageQuery<String>> = vec![
+		PaginatedStorageQuery {
+			key: key.clone(),
+			query_type: StorageQueryType::DescendantsHashes,
+			pagination_start_key: None,
+		},
+		PaginatedStorageQuery {
+			key: key.clone(),
+			query_type: StorageQueryType::DescendantsValues,
+			pagination_start_key: None,
+		},
+		PaginatedStorageQuery {
+			key: key.clone(),
+			query_type: StorageQueryType::Hash,
+			pagination_start_key: None,
+		},
+		PaginatedStorageQuery {
+			key: key.clone(),
+			query_type: StorageQueryType::Value,
+			pagination_start_key: None,
+		},
+	];
+
+	let result: ArchiveStorageResult = api
+		.call("archive_unstable_storage", rpc_params![&block_hash, items.clone()])
+		.await
+		.unwrap();
+
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			// Key has not been imported yet.
+			assert_eq!(result.len(), 0);
+			assert_eq!(discarded_items, 0);
+		},
+		_ => panic!("Unexpected result"),
+	};
+
+	// Import a block with the given key value pair.
+	let mut builder = BlockBuilderBuilder::new(&*client)
+		.on_parent_block(block.hash())
+		.with_parent_block_number(1)
+		.build()
+		.unwrap();
+	builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap();
+	let block = builder.build().unwrap().block;
+	client.import(BlockOrigin::Own, block.clone()).await.unwrap();
+
+	let block_hash = format!("{:?}", block.header.hash());
+	let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE));
+	let expected_value = hex_string(&VALUE);
+
+	let result: ArchiveStorageResult = api
+		.call("archive_unstable_storage", rpc_params![&block_hash, items])
+		.await
+		.unwrap();
+
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 4);
+			assert_eq!(discarded_items, 0);
+
+			assert_eq!(result[0].key, key);
+			assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone()));
+			assert_eq!(result[1].key, key);
+			assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone()));
+			assert_eq!(result[2].key, key);
+			assert_eq!(result[2].result, StorageResultType::Hash(expected_hash));
+			assert_eq!(result[3].key, key);
+			assert_eq!(result[3].result, StorageResultType::Value(expected_value));
+		},
+		_ => panic!("Unexpected result"),
+	};
+}
+
+#[tokio::test]
+async fn archive_storage_closest_merkle_value() {
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
+
+	/// The core of this test.
+	///
+	/// Checks keys that are exact match, keys with descedant and keys that should not return
+	/// values.
+	///
+	/// Returns (key, merkle value) pairs.
+	async fn expect_merkle_request(
+		api: &RpcModule<Archive<Backend, Block, Client<Backend>>>,
+		block_hash: String,
+	) -> HashMap<String, String> {
+		let result: ArchiveStorageResult = api
+			.call(
+				"archive_unstable_storage",
+				rpc_params![
+					&block_hash,
+					vec![
+						PaginatedStorageQuery {
+							key: hex_string(b":AAAA"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						PaginatedStorageQuery {
+							key: hex_string(b":AAAB"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						// Key with descedent.
+						PaginatedStorageQuery {
+							key: hex_string(b":A"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						PaginatedStorageQuery {
+							key: hex_string(b":AA"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						// Keys below this comment do not produce a result.
+						// Key that exceed the keyspace of the trie.
+						PaginatedStorageQuery {
+							key: hex_string(b":AAAAX"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						PaginatedStorageQuery {
+							key: hex_string(b":AAABX"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						// Key that are not part of the trie.
+						PaginatedStorageQuery {
+							key: hex_string(b":AAX"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+						PaginatedStorageQuery {
+							key: hex_string(b":AAAX"),
+							query_type: StorageQueryType::ClosestDescendantMerkleValue,
+							pagination_start_key: None,
+						},
+					]
+				],
+			)
+			.await
+			.unwrap();
+
+		let merkle_values: HashMap<_, _> = match result {
+			ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result
+				.into_iter()
+				.map(|res| {
+					let value = match res.result {
+						StorageResultType::ClosestDescendantMerkleValue(value) => value,
+						_ => panic!("Unexpected StorageResultType"),
+					};
+					(res.key, value)
+				})
+				.collect(),
+			_ => panic!("Unexpected result"),
+		};
+
+		// Response for AAAA, AAAB, A and AA.
+		assert_eq!(merkle_values.len(), 4);
+
+		// While checking for expected merkle values to align,
+		// the following will check that the returned keys are
+		// expected.
+
+		// Values for AAAA and AAAB are different.
+		assert_ne!(
+			merkle_values.get(&hex_string(b":AAAA")).unwrap(),
+			merkle_values.get(&hex_string(b":AAAB")).unwrap()
+		);
+
+		// Values for A and AA should be on the same branch node.
+		assert_eq!(
+			merkle_values.get(&hex_string(b":A")).unwrap(),
+			merkle_values.get(&hex_string(b":AA")).unwrap()
+		);
+		// The branch node value must be different than the leaf of either
+		// AAAA and AAAB.
+		assert_ne!(
+			merkle_values.get(&hex_string(b":A")).unwrap(),
+			merkle_values.get(&hex_string(b":AAAA")).unwrap()
+		);
+		assert_ne!(
+			merkle_values.get(&hex_string(b":A")).unwrap(),
+			merkle_values.get(&hex_string(b":AAAB")).unwrap()
+		);
+
+		merkle_values
+	}
+
+	// Import a new block with storage changes.
+	let mut builder = BlockBuilderBuilder::new(&*client)
+		.on_parent_block(client.chain_info().genesis_hash)
+		.with_parent_block_number(0)
+		.build()
+		.unwrap();
+	builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap();
+	builder.push_storage_change(b":AAAB".to_vec(), Some(vec![2; 64])).unwrap();
+	let block = builder.build().unwrap().block;
+	let block_hash = format!("{:?}", block.header.hash());
+	client.import(BlockOrigin::Own, block.clone()).await.unwrap();
+
+	let merkle_values_lhs = expect_merkle_request(&api, block_hash).await;
+
+	// Import a new block with and change AAAB value.
+	let mut builder = BlockBuilderBuilder::new(&*client)
+		.on_parent_block(block.hash())
+		.with_parent_block_number(1)
+		.build()
+		.unwrap();
+	builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap();
+	builder.push_storage_change(b":AAAB".to_vec(), Some(vec![3; 64])).unwrap();
+	let block = builder.build().unwrap().block;
+	let block_hash = format!("{:?}", block.header.hash());
+	client.import(BlockOrigin::Own, block.clone()).await.unwrap();
+
+	let merkle_values_rhs = expect_merkle_request(&api, block_hash).await;
+
+	// Change propagated to the root.
+	assert_ne!(
+		merkle_values_lhs.get(&hex_string(b":A")).unwrap(),
+		merkle_values_rhs.get(&hex_string(b":A")).unwrap()
+	);
+	assert_ne!(
+		merkle_values_lhs.get(&hex_string(b":AAAB")).unwrap(),
+		merkle_values_rhs.get(&hex_string(b":AAAB")).unwrap()
+	);
+	// However the AAAA branch leaf remains unchanged.
+	assert_eq!(
+		merkle_values_lhs.get(&hex_string(b":AAAA")).unwrap(),
+		merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap()
+	);
+}
+
+#[tokio::test]
+async fn archive_storage_paginate_iterations() {
+	// 1 iteration allowed before pagination kicks in.
+	let (mut client, api) = setup_api(1, MAX_QUERIED_LIMIT);
+
+	// Import a new block with storage changes.
+	let mut builder = BlockBuilderBuilder::new(&*client)
+		.on_parent_block(client.chain_info().genesis_hash)
+		.with_parent_block_number(0)
+		.build()
+		.unwrap();
+	builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap();
+	builder.push_storage_change(b":mo".to_vec(), Some(b"ab".to_vec())).unwrap();
+	builder.push_storage_change(b":moc".to_vec(), Some(b"abc".to_vec())).unwrap();
+	builder.push_storage_change(b":moD".to_vec(), Some(b"abcmoD".to_vec())).unwrap();
+	builder.push_storage_change(b":mock".to_vec(), Some(b"abcd".to_vec())).unwrap();
+	let block = builder.build().unwrap().block;
+	let block_hash = format!("{:?}", block.header.hash());
+	client.import(BlockOrigin::Own, block.clone()).await.unwrap();
+
+	// Calling with an invalid hash.
+	let invalid_hash = hex_string(&INVALID_HASH);
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&invalid_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: None,
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Err(_) => (),
+		_ => panic!("Unexpected result"),
+	};
+
+	// Valid call with storage at the key.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: None,
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 1);
+			assert_eq!(discarded_items, 0);
+
+			assert_eq!(result[0].key, hex_string(b":m"));
+			assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a")));
+		},
+		_ => panic!("Unexpected result"),
+	};
+
+	// Continue with pagination.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: Some(hex_string(b":m")),
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 1);
+			assert_eq!(discarded_items, 0);
+
+			assert_eq!(result[0].key, hex_string(b":mo"));
+			assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab")));
+		},
+		_ => panic!("Unexpected result"),
+	};
+
+	// Continue with pagination.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: Some(hex_string(b":mo")),
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 1);
+			assert_eq!(discarded_items, 0);
+
+			assert_eq!(result[0].key, hex_string(b":moD"));
+			assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD")));
+		},
+		_ => panic!("Unexpected result"),
+	};
+
+	// Continue with pagination.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: Some(hex_string(b":moD")),
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 1);
+			assert_eq!(discarded_items, 0);
+
+			assert_eq!(result[0].key, hex_string(b":moc"));
+			assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc")));
+		},
+		_ => panic!("Unexpected result"),
+	};
+
+	// Continue with pagination.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: Some(hex_string(b":moc")),
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 1);
+			assert_eq!(discarded_items, 0);
+
+			assert_eq!(result[0].key, hex_string(b":mock"));
+			assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd")));
+		},
+		_ => panic!("Unexpected result"),
+	};
+
+	// Continue with pagination until no keys are returned.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![PaginatedStorageQuery {
+					key: hex_string(b":m"),
+					query_type: StorageQueryType::DescendantsValues,
+					pagination_start_key: Some(hex_string(b":mock")),
+				}]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 0);
+			assert_eq!(discarded_items, 0);
+		},
+		_ => panic!("Unexpected result"),
+	};
+}
+
+#[tokio::test]
+async fn archive_storage_discarded_items() {
+	// One query at a time
+	let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, 1);
+
+	// Import a new block with storage changes.
+	let mut builder = BlockBuilderBuilder::new(&*client)
+		.on_parent_block(client.chain_info().genesis_hash)
+		.with_parent_block_number(0)
+		.build()
+		.unwrap();
+	builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap();
+	let block = builder.build().unwrap().block;
+	let block_hash = format!("{:?}", block.header.hash());
+	client.import(BlockOrigin::Own, block.clone()).await.unwrap();
+
+	// Valid call with storage at the key.
+	let result: ArchiveStorageResult = api
+		.call(
+			"archive_unstable_storage",
+			rpc_params![
+				&block_hash,
+				vec![
+					PaginatedStorageQuery {
+						key: hex_string(b":m"),
+						query_type: StorageQueryType::Value,
+						pagination_start_key: None,
+					},
+					PaginatedStorageQuery {
+						key: hex_string(b":m"),
+						query_type: StorageQueryType::Hash,
+						pagination_start_key: None,
+					},
+					PaginatedStorageQuery {
+						key: hex_string(b":m"),
+						query_type: StorageQueryType::Hash,
+						pagination_start_key: None,
+					}
+				]
+			],
+		)
+		.await
+		.unwrap();
+	match result {
+		ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
+			assert_eq!(result.len(), 1);
+			assert_eq!(discarded_items, 2);
+
+			assert_eq!(result[0].key, hex_string(b":m"));
+			assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a")));
+		},
+		_ => panic!("Unexpected result"),
+	};
+}
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs
index 9ae80137955..3d6091b91bd 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs
@@ -19,7 +19,10 @@
 #![allow(non_snake_case)]
 
 //! API trait of the chain head.
-use crate::chain_head::event::{FollowEvent, MethodResponse, StorageQuery};
+use crate::{
+	chain_head::event::{FollowEvent, MethodResponse},
+	common::events::StorageQuery,
+};
 use jsonrpsee::{core::RpcResult, proc_macros::rpc};
 use sp_rpc::list::ListOrValue;
 
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs
index 8e04ac7b177..6e4d6ade965 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs
@@ -27,11 +27,11 @@ use crate::{
 		api::ChainHeadApiServer,
 		chain_head_follow::ChainHeadFollower,
 		error::Error as ChainHeadRpcError,
-		event::{FollowEvent, MethodResponse, OperationError, StorageQuery},
-		hex_string,
+		event::{FollowEvent, MethodResponse, OperationError},
 		subscription::{SubscriptionManagement, SubscriptionManagementError},
 	},
-	SubscriptionTaskExecutor,
+	common::events::StorageQuery,
+	hex_string, SubscriptionTaskExecutor,
 };
 use codec::Encode;
 use futures::future::FutureExt;
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs
index c23489a050e..ee39ec253a3 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs
@@ -22,33 +22,24 @@ use std::{collections::VecDeque, marker::PhantomData, sync::Arc};
 
 use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider};
 use sc_utils::mpsc::TracingUnboundedSender;
-use sp_core::storage::well_known_keys;
 use sp_runtime::traits::Block as BlockT;
 
-use crate::chain_head::event::OperationStorageItems;
-
-use super::{
-	event::{
-		OperationError, OperationId, StorageQuery, StorageQueryType, StorageResult,
-		StorageResultType,
+use crate::{
+	chain_head::{
+		event::{OperationError, OperationId, OperationStorageItems},
+		subscription::BlockGuard,
+		FollowEvent,
+	},
+	common::{
+		events::{StorageQuery, StorageQueryType},
+		storage::{IterQueryType, QueryIter, QueryIterResult, Storage},
 	},
-	hex_string,
-	subscription::BlockGuard,
-	FollowEvent,
 };
 
-/// The query type of an interation.
-enum IterQueryType {
-	/// Iterating over (key, value) pairs.
-	Value,
-	/// Iterating over (key, hash) pairs.
-	Hash,
-}
-
 /// Generates the events of the `chainHead_storage` method.
 pub struct ChainHeadStorage<Client, Block, BE> {
-	/// Substrate client.
-	client: Arc<Client>,
+	/// Storage client.
+	client: Storage<Client, Block, BE>,
 	/// Queue of operations that may require pagination.
 	iter_operations: VecDeque<QueryIter>,
 	/// The maximum number of items reported by the `chainHead_storage` before
@@ -61,7 +52,7 @@ impl<Client, Block, BE> ChainHeadStorage<Client, Block, BE> {
 	/// Constructs a new [`ChainHeadStorage`].
 	pub fn new(client: Arc<Client>, operation_max_storage_items: usize) -> Self {
 		Self {
-			client,
+			client: Storage::new(client),
 			iter_operations: VecDeque::new(),
 			operation_max_storage_items,
 			_phandom: PhantomData,
@@ -69,163 +60,12 @@ impl<Client, Block, BE> ChainHeadStorage<Client, Block, BE> {
 	}
 }
 
-/// Query to iterate over storage.
-struct QueryIter {
-	/// The key from which the iteration was started.
-	query_key: StorageKey,
-	/// The key after which pagination should resume.
-	pagination_start_key: Option<StorageKey>,
-	/// The type of the query (either value or hash).
-	ty: IterQueryType,
-}
-
-/// Checks if the provided key (main or child key) is valid
-/// for queries.
-///
-/// Keys that are identical to `:child_storage:` or `:child_storage:default:`
-/// are not queryable.
-fn is_key_queryable(key: &[u8]) -> bool {
-	!well_known_keys::is_default_child_storage_key(key) &&
-		!well_known_keys::is_child_storage_key(key)
-}
-
-/// The result of making a query call.
-type QueryResult = Result<Option<StorageResult>, String>;
-
-/// The result of iterating over keys.
-type QueryIterResult = Result<(Vec<StorageResult>, Option<QueryIter>), String>;
-
 impl<Client, Block, BE> ChainHeadStorage<Client, Block, BE>
 where
 	Block: BlockT + 'static,
 	BE: Backend<Block> + 'static,
 	Client: StorageProvider<Block, BE> + 'static,
 {
-	/// Fetch the value from storage.
-	fn query_storage_value(
-		&self,
-		hash: Block::Hash,
-		key: &StorageKey,
-		child_key: Option<&ChildInfo>,
-	) -> QueryResult {
-		let result = if let Some(child_key) = child_key {
-			self.client.child_storage(hash, child_key, key)
-		} else {
-			self.client.storage(hash, key)
-		};
-
-		result
-			.map(|opt| {
-				QueryResult::Ok(opt.map(|storage_data| StorageResult {
-					key: hex_string(&key.0),
-					result: StorageResultType::Value(hex_string(&storage_data.0)),
-				}))
-			})
-			.unwrap_or_else(|error| QueryResult::Err(error.to_string()))
-	}
-
-	/// Fetch the hash of a value from storage.
-	fn query_storage_hash(
-		&self,
-		hash: Block::Hash,
-		key: &StorageKey,
-		child_key: Option<&ChildInfo>,
-	) -> QueryResult {
-		let result = if let Some(child_key) = child_key {
-			self.client.child_storage_hash(hash, child_key, key)
-		} else {
-			self.client.storage_hash(hash, key)
-		};
-
-		result
-			.map(|opt| {
-				QueryResult::Ok(opt.map(|storage_data| StorageResult {
-					key: hex_string(&key.0),
-					result: StorageResultType::Hash(hex_string(&storage_data.as_ref())),
-				}))
-			})
-			.unwrap_or_else(|error| QueryResult::Err(error.to_string()))
-	}
-
-	/// Fetch the closest merkle value.
-	fn query_storage_merkle_value(
-		&self,
-		hash: Block::Hash,
-		key: &StorageKey,
-		child_key: Option<&ChildInfo>,
-	) -> QueryResult {
-		let result = if let Some(child_key) = child_key {
-			self.client.child_closest_merkle_value(hash, child_key, key)
-		} else {
-			self.client.closest_merkle_value(hash, key)
-		};
-
-		result
-			.map(|opt| {
-				QueryResult::Ok(opt.map(|storage_data| {
-					let result = match &storage_data {
-						sc_client_api::MerkleValue::Node(data) => hex_string(&data.as_slice()),
-						sc_client_api::MerkleValue::Hash(hash) => hex_string(&hash.as_ref()),
-					};
-
-					StorageResult {
-						key: hex_string(&key.0),
-						result: StorageResultType::ClosestDescendantMerkleValue(result),
-					}
-				}))
-			})
-			.unwrap_or_else(|error| QueryResult::Err(error.to_string()))
-	}
-
-	/// Iterate over at most `operation_max_storage_items` keys.
-	///
-	/// Returns the storage result with a potential next key to resume iteration.
-	fn query_storage_iter_pagination(
-		&self,
-		query: QueryIter,
-		hash: Block::Hash,
-		child_key: Option<&ChildInfo>,
-	) -> QueryIterResult {
-		let QueryIter { ty, query_key, pagination_start_key } = query;
-
-		let mut keys_iter = if let Some(child_key) = child_key {
-			self.client.child_storage_keys(
-				hash,
-				child_key.to_owned(),
-				Some(&query_key),
-				pagination_start_key.as_ref(),
-			)
-		} else {
-			self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref())
-		}
-		.map_err(|err| err.to_string())?;
-
-		let mut ret = Vec::with_capacity(self.operation_max_storage_items);
-		let mut next_pagination_key = None;
-		for _ in 0..self.operation_max_storage_items {
-			let Some(key) = keys_iter.next() else { break };
-
-			next_pagination_key = Some(key.clone());
-
-			let result = match ty {
-				IterQueryType::Value => self.query_storage_value(hash, &key, child_key),
-				IterQueryType::Hash => self.query_storage_hash(hash, &key, child_key),
-			}?;
-
-			if let Some(value) = result {
-				ret.push(value);
-			}
-		}
-
-		// Save the next key if any to continue the iteration.
-		let maybe_next_query = keys_iter.next().map(|_| QueryIter {
-			ty,
-			query_key,
-			pagination_start_key: next_pagination_key,
-		});
-		Ok((ret, maybe_next_query))
-	}
-
 	/// Iterate over (key, hash) and (key, value) generating the `WaitingForContinue` event if
 	/// necessary.
 	async fn generate_storage_iter_events(
@@ -242,7 +82,12 @@ where
 				return
 			}
 
-			let result = self.query_storage_iter_pagination(query, hash, child_key.as_ref());
+			let result = self.client.query_iter_pagination(
+				query,
+				hash,
+				child_key.as_ref(),
+				self.operation_max_storage_items,
+			);
 			let (events, maybe_next_query) = match result {
 				QueryIterResult::Ok(result) => result,
 				QueryIterResult::Err(error) => {
@@ -294,24 +139,11 @@ where
 		let sender = block_guard.response_sender();
 		let operation = block_guard.operation();
 
-		if let Some(child_key) = child_key.as_ref() {
-			if !is_key_queryable(child_key.storage_key()) {
-				let _ = sender.unbounded_send(FollowEvent::<Block::Hash>::OperationStorageDone(
-					OperationId { operation_id: operation.operation_id() },
-				));
-				return
-			}
-		}
-
 		let mut storage_results = Vec::with_capacity(items.len());
 		for item in items {
-			if !is_key_queryable(&item.key.0) {
-				continue
-			}
-
 			match item.query_type {
 				StorageQueryType::Value => {
-					match self.query_storage_value(hash, &item.key, child_key.as_ref()) {
+					match self.client.query_value(hash, &item.key, child_key.as_ref()) {
 						Ok(Some(value)) => storage_results.push(value),
 						Ok(None) => continue,
 						Err(error) => {
@@ -321,7 +153,7 @@ where
 					}
 				},
 				StorageQueryType::Hash =>
-					match self.query_storage_hash(hash, &item.key, child_key.as_ref()) {
+					match self.client.query_hash(hash, &item.key, child_key.as_ref()) {
 						Ok(Some(value)) => storage_results.push(value),
 						Ok(None) => continue,
 						Err(error) => {
@@ -330,7 +162,7 @@ where
 						},
 					},
 				StorageQueryType::ClosestDescendantMerkleValue =>
-					match self.query_storage_merkle_value(hash, &item.key, child_key.as_ref()) {
+					match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) {
 						Ok(Some(value)) => storage_results.push(value),
 						Ok(None) => continue,
 						Err(error) => {
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/event.rs b/substrate/client/rpc-spec-v2/src/chain_head/event.rs
index b5f9d6cc2ff..560ab87eab4 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/event.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/event.rs
@@ -23,6 +23,8 @@ use sp_api::ApiError;
 use sp_version::RuntimeVersion;
 use std::collections::BTreeMap;
 
+use crate::common::events::StorageResult;
+
 /// The operation could not be processed due to an error.
 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
@@ -313,56 +315,6 @@ pub enum FollowEvent<Hash> {
 	Stop,
 }
 
-/// The storage item received as paramter.
-#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-pub struct StorageQuery<Key> {
-	/// The provided key.
-	pub key: Key,
-	/// The type of the storage query.
-	#[serde(rename = "type")]
-	pub query_type: StorageQueryType,
-}
-
-/// The type of the storage query.
-#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-pub enum StorageQueryType {
-	/// Fetch the value of the provided key.
-	Value,
-	/// Fetch the hash of the value of the provided key.
-	Hash,
-	/// Fetch the closest descendant merkle value.
-	ClosestDescendantMerkleValue,
-	/// Fetch the values of all descendants of they provided key.
-	DescendantsValues,
-	/// Fetch the hashes of the values of all descendants of they provided key.
-	DescendantsHashes,
-}
-
-/// The storage result.
-#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-pub struct StorageResult {
-	/// The hex-encoded key of the result.
-	pub key: String,
-	/// The result of the query.
-	#[serde(flatten)]
-	pub result: StorageResultType,
-}
-
-/// The type of the storage query.
-#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-pub enum StorageResultType {
-	/// Fetch the value of the provided key.
-	Value(String),
-	/// Fetch the hash of the value of the provided key.
-	Hash(String),
-	/// Fetch the closest descendant merkle value.
-	ClosestDescendantMerkleValue(String),
-}
-
 /// The method respose of `chainHead_body`, `chainHead_call` and `chainHead_storage`.
 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
@@ -388,6 +340,8 @@ pub struct MethodResponseStarted {
 
 #[cfg(test)]
 mod tests {
+	use crate::common::events::StorageResultType;
+
 	use super::*;
 
 	#[test]
@@ -697,96 +651,4 @@ mod tests {
 		let event_dec: MethodResponse = serde_json::from_str(exp).unwrap();
 		assert_eq!(event_dec, event);
 	}
-
-	#[test]
-	fn chain_head_storage_query() {
-		// Item with Value.
-		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Value };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","type":"value"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-
-		// Item with Hash.
-		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Hash };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","type":"hash"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-
-		// Item with DescendantsValues.
-		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsValues };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","type":"descendantsValues"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-
-		// Item with DescendantsHashes.
-		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsHashes };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","type":"descendantsHashes"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-
-		// Item with Merkle.
-		let item =
-			StorageQuery { key: "0x1", query_type: StorageQueryType::ClosestDescendantMerkleValue };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","type":"closestDescendantMerkleValue"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-	}
-
-	#[test]
-	fn chain_head_storage_result() {
-		// Item with Value.
-		let item =
-			StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","value":"res"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageResult = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-
-		// Item with Hash.
-		let item =
-			StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) };
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","hash":"res"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageResult = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-
-		// Item with DescendantsValues.
-		let item = StorageResult {
-			key: "0x1".into(),
-			result: StorageResultType::ClosestDescendantMerkleValue("res".into()),
-		};
-		// Encode
-		let ser = serde_json::to_string(&item).unwrap();
-		let exp = r#"{"key":"0x1","closestDescendantMerkleValue":"res"}"#;
-		assert_eq!(ser, exp);
-		// Decode
-		let dec: StorageResult = serde_json::from_str(exp).unwrap();
-		assert_eq!(dec, item);
-	}
 }
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/mod.rs
index 1bd22885780..4cbbd00f64f 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/mod.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/mod.rs
@@ -42,10 +42,3 @@ pub use event::{
 	BestBlockChanged, ErrorEvent, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent,
 	RuntimeVersionEvent,
 };
-
-use sp_core::hexdisplay::{AsBytesRef, HexDisplay};
-
-/// Util function to print the results of `chianHead` as hex string
-pub(crate) fn hex_string<Data: AsBytesRef>(data: &Data) -> String {
-	format!("0x{:?}", HexDisplay::from(data))
-}
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs
index c8f2362b9eb..4859793a8e2 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs
@@ -16,9 +16,10 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-use crate::chain_head::{
-	event::{MethodResponse, StorageQuery, StorageQueryType, StorageResultType},
-	test_utils::ChainHeadMockClient,
+use crate::{
+	chain_head::{event::MethodResponse, test_utils::ChainHeadMockClient},
+	common::events::{StorageQuery, StorageQueryType, StorageResultType},
+	hex_string,
 };
 
 use super::*;
diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs
new file mode 100644
index 00000000000..b1627d74c84
--- /dev/null
+++ b/substrate/client/rpc-spec-v2/src/common/events.rs
@@ -0,0 +1,273 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Common events for RPC-V2 spec.
+
+use serde::{Deserialize, Serialize};
+
+/// The storage item to query.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct StorageQuery<Key> {
+	/// The provided key.
+	pub key: Key,
+	/// The type of the storage query.
+	#[serde(rename = "type")]
+	pub query_type: StorageQueryType,
+}
+
+/// The storage item to query with pagination.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct PaginatedStorageQuery<Key> {
+	/// The provided key.
+	pub key: Key,
+	/// The type of the storage query.
+	#[serde(rename = "type")]
+	pub query_type: StorageQueryType,
+	/// The pagination key from which the iteration should resume.
+	#[serde(skip_serializing_if = "Option::is_none")]
+	#[serde(default)]
+	pub pagination_start_key: Option<Key>,
+}
+
+/// The type of the storage query.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub enum StorageQueryType {
+	/// Fetch the value of the provided key.
+	Value,
+	/// Fetch the hash of the value of the provided key.
+	Hash,
+	/// Fetch the closest descendant merkle value.
+	ClosestDescendantMerkleValue,
+	/// Fetch the values of all descendants of they provided key.
+	DescendantsValues,
+	/// Fetch the hashes of the values of all descendants of they provided key.
+	DescendantsHashes,
+}
+
+impl StorageQueryType {
+	/// Returns `true` if the query is a descendant query.
+	pub fn is_descendant_query(&self) -> bool {
+		matches!(self, Self::DescendantsValues | Self::DescendantsHashes)
+	}
+}
+
+/// The storage result.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct StorageResult {
+	/// The hex-encoded key of the result.
+	pub key: String,
+	/// The result of the query.
+	#[serde(flatten)]
+	pub result: StorageResultType,
+}
+
+/// The type of the storage query.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub enum StorageResultType {
+	/// Fetch the value of the provided key.
+	Value(String),
+	/// Fetch the hash of the value of the provided key.
+	Hash(String),
+	/// Fetch the closest descendant merkle value.
+	ClosestDescendantMerkleValue(String),
+}
+
+/// The error of a storage call.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct StorageResultErr {
+	/// The hex-encoded key of the result.
+	pub key: String,
+	/// The result of the query.
+	#[serde(flatten)]
+	pub error: StorageResultType,
+}
+
+/// The result of a storage call.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum ArchiveStorageResult {
+	/// Query generated a result.
+	Ok(ArchiveStorageMethodOk),
+	/// Query encountered an error.
+	Err(ArchiveStorageMethodErr),
+}
+
+impl ArchiveStorageResult {
+	/// Create a new `ArchiveStorageResult::Ok` result.
+	pub fn ok(result: Vec<StorageResult>, discarded_items: usize) -> Self {
+		Self::Ok(ArchiveStorageMethodOk { result, discarded_items })
+	}
+
+	/// Create a new `ArchiveStorageResult::Err` result.
+	pub fn err(error: String) -> Self {
+		Self::Err(ArchiveStorageMethodErr { error })
+	}
+}
+
+/// The result of a storage call.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ArchiveStorageMethodOk {
+	/// Reported results.
+	pub result: Vec<StorageResult>,
+	/// Number of discarded items.
+	pub discarded_items: usize,
+}
+
+/// The error of a storage call.
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ArchiveStorageMethodErr {
+	/// Reported error.
+	pub error: String,
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	#[test]
+	fn storage_result() {
+		// Item with Value.
+		let item =
+			StorageResult { key: "0x1".into(), result: StorageResultType::Value("res".into()) };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","value":"res"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageResult = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		// Item with Hash.
+		let item =
+			StorageResult { key: "0x1".into(), result: StorageResultType::Hash("res".into()) };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","hash":"res"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageResult = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		// Item with DescendantsValues.
+		let item = StorageResult {
+			key: "0x1".into(),
+			result: StorageResultType::ClosestDescendantMerkleValue("res".into()),
+		};
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","closestDescendantMerkleValue":"res"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageResult = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+	}
+
+	#[test]
+	fn storage_query() {
+		// Item with Value.
+		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Value };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"value"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		// Item with Hash.
+		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Hash };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"hash"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		// Item with DescendantsValues.
+		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsValues };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"descendantsValues"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		// Item with DescendantsHashes.
+		let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsHashes };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"descendantsHashes"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		// Item with Merkle.
+		let item =
+			StorageQuery { key: "0x1", query_type: StorageQueryType::ClosestDescendantMerkleValue };
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"closestDescendantMerkleValue"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+	}
+
+	#[test]
+	fn storage_query_paginated() {
+		let item = PaginatedStorageQuery {
+			key: "0x1",
+			query_type: StorageQueryType::Value,
+			pagination_start_key: None,
+		};
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"value"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec.key, item.key);
+		assert_eq!(dec.query_type, item.query_type);
+		let dec: PaginatedStorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+
+		let item = PaginatedStorageQuery {
+			key: "0x1",
+			query_type: StorageQueryType::Value,
+			pagination_start_key: Some("0x2"),
+		};
+		// Encode
+		let ser = serde_json::to_string(&item).unwrap();
+		let exp = r#"{"key":"0x1","type":"value","paginationStartKey":"0x2"}"#;
+		assert_eq!(ser, exp);
+		// Decode
+		let dec: PaginatedStorageQuery<&str> = serde_json::from_str(exp).unwrap();
+		assert_eq!(dec, item);
+	}
+}
diff --git a/substrate/client/rpc-spec-v2/src/common/mod.rs b/substrate/client/rpc-spec-v2/src/common/mod.rs
new file mode 100644
index 00000000000..ac1af8fce3c
--- /dev/null
+++ b/substrate/client/rpc-spec-v2/src/common/mod.rs
@@ -0,0 +1,17 @@
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Common types and functionality for the RPC-V2 spec.
+
+pub mod events;
+pub mod storage;
diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs
new file mode 100644
index 00000000000..bd249e033f8
--- /dev/null
+++ b/substrate/client/rpc-spec-v2/src/common/storage.rs
@@ -0,0 +1,198 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! Storage queries for the RPC-V2 spec.
+
+use std::{marker::PhantomData, sync::Arc};
+
+use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider};
+use sp_runtime::traits::Block as BlockT;
+
+use super::events::{StorageResult, StorageResultType};
+use crate::hex_string;
+
+/// Call into the storage of blocks.
+pub struct Storage<Client, Block, BE> {
+	/// Substrate client.
+	client: Arc<Client>,
+	_phandom: PhantomData<(BE, Block)>,
+}
+
+impl<Client, Block, BE> Storage<Client, Block, BE> {
+	/// Constructs a new [`Storage`].
+	pub fn new(client: Arc<Client>) -> Self {
+		Self { client, _phandom: PhantomData }
+	}
+}
+
+/// Query to iterate over storage.
+pub struct QueryIter {
+	/// The key from which the iteration was started.
+	pub query_key: StorageKey,
+	/// The key after which pagination should resume.
+	pub pagination_start_key: Option<StorageKey>,
+	/// The type of the query (either value or hash).
+	pub ty: IterQueryType,
+}
+
+/// The query type of an iteration.
+pub enum IterQueryType {
+	/// Iterating over (key, value) pairs.
+	Value,
+	/// Iterating over (key, hash) pairs.
+	Hash,
+}
+
+/// The result of making a query call.
+pub type QueryResult = Result<Option<StorageResult>, String>;
+
+/// The result of iterating over keys.
+pub type QueryIterResult = Result<(Vec<StorageResult>, Option<QueryIter>), String>;
+
+impl<Client, Block, BE> Storage<Client, Block, BE>
+where
+	Block: BlockT + 'static,
+	BE: Backend<Block> + 'static,
+	Client: StorageProvider<Block, BE> + 'static,
+{
+	/// Fetch the value from storage.
+	pub fn query_value(
+		&self,
+		hash: Block::Hash,
+		key: &StorageKey,
+		child_key: Option<&ChildInfo>,
+	) -> QueryResult {
+		let result = if let Some(child_key) = child_key {
+			self.client.child_storage(hash, child_key, key)
+		} else {
+			self.client.storage(hash, key)
+		};
+
+		result
+			.map(|opt| {
+				QueryResult::Ok(opt.map(|storage_data| StorageResult {
+					key: hex_string(&key.0),
+					result: StorageResultType::Value(hex_string(&storage_data.0)),
+				}))
+			})
+			.unwrap_or_else(|error| QueryResult::Err(error.to_string()))
+	}
+
+	/// Fetch the hash of a value from storage.
+	pub fn query_hash(
+		&self,
+		hash: Block::Hash,
+		key: &StorageKey,
+		child_key: Option<&ChildInfo>,
+	) -> QueryResult {
+		let result = if let Some(child_key) = child_key {
+			self.client.child_storage_hash(hash, child_key, key)
+		} else {
+			self.client.storage_hash(hash, key)
+		};
+
+		result
+			.map(|opt| {
+				QueryResult::Ok(opt.map(|storage_data| StorageResult {
+					key: hex_string(&key.0),
+					result: StorageResultType::Hash(hex_string(&storage_data.as_ref())),
+				}))
+			})
+			.unwrap_or_else(|error| QueryResult::Err(error.to_string()))
+	}
+
+	/// Fetch the closest merkle value.
+	pub fn query_merkle_value(
+		&self,
+		hash: Block::Hash,
+		key: &StorageKey,
+		child_key: Option<&ChildInfo>,
+	) -> QueryResult {
+		let result = if let Some(child_key) = child_key {
+			self.client.child_closest_merkle_value(hash, child_key, key)
+		} else {
+			self.client.closest_merkle_value(hash, key)
+		};
+
+		result
+			.map(|opt| {
+				QueryResult::Ok(opt.map(|storage_data| {
+					let result = match &storage_data {
+						sc_client_api::MerkleValue::Node(data) => hex_string(&data.as_slice()),
+						sc_client_api::MerkleValue::Hash(hash) => hex_string(&hash.as_ref()),
+					};
+
+					StorageResult {
+						key: hex_string(&key.0),
+						result: StorageResultType::ClosestDescendantMerkleValue(result),
+					}
+				}))
+			})
+			.unwrap_or_else(|error| QueryResult::Err(error.to_string()))
+	}
+
+	/// Iterate over at most the provided number of keys.
+	///
+	/// Returns the storage result with a potential next key to resume iteration.
+	pub fn query_iter_pagination(
+		&self,
+		query: QueryIter,
+		hash: Block::Hash,
+		child_key: Option<&ChildInfo>,
+		count: usize,
+	) -> QueryIterResult {
+		let QueryIter { ty, query_key, pagination_start_key } = query;
+
+		let mut keys_iter = if let Some(child_key) = child_key {
+			self.client.child_storage_keys(
+				hash,
+				child_key.to_owned(),
+				Some(&query_key),
+				pagination_start_key.as_ref(),
+			)
+		} else {
+			self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref())
+		}
+		.map_err(|err| err.to_string())?;
+
+		let mut ret = Vec::with_capacity(count);
+		let mut next_pagination_key = None;
+		for _ in 0..count {
+			let Some(key) = keys_iter.next() else { break };
+
+			next_pagination_key = Some(key.clone());
+
+			let result = match ty {
+				IterQueryType::Value => self.query_value(hash, &key, child_key),
+				IterQueryType::Hash => self.query_hash(hash, &key, child_key),
+			}?;
+
+			if let Some(value) = result {
+				ret.push(value);
+			}
+		}
+
+		// Save the next key if any to continue the iteration.
+		let maybe_next_query = keys_iter.next().map(|_| QueryIter {
+			ty,
+			query_key,
+			pagination_start_key: next_pagination_key,
+		});
+		Ok((ret, maybe_next_query))
+	}
+}
diff --git a/substrate/client/rpc-spec-v2/src/lib.rs b/substrate/client/rpc-spec-v2/src/lib.rs
index d202bfef4a7..23ed422cff1 100644
--- a/substrate/client/rpc-spec-v2/src/lib.rs
+++ b/substrate/client/rpc-spec-v2/src/lib.rs
@@ -24,6 +24,9 @@
 #![deny(unused_crate_dependencies)]
 
 use serde::{Deserialize, Serialize};
+use sp_core::hexdisplay::{AsBytesRef, HexDisplay};
+
+mod common;
 
 pub mod archive;
 pub mod chain_head;
@@ -39,7 +42,7 @@ pub type SubscriptionTaskExecutor = std::sync::Arc<dyn sp_core::traits::SpawnNam
 pub enum MethodResult {
 	/// Method generated a result.
 	Ok(MethodResultOk),
-	/// Method ecountered an error.
+	/// Method encountered an error.
 	Err(MethodResultErr),
 }
 
@@ -75,6 +78,11 @@ pub struct MethodResultErr {
 	pub error: String,
 }
 
+/// Util function to encode a value as a hex string
+pub fn hex_string<Data: AsBytesRef>(data: &Data) -> String {
+	format!("0x{:?}", HexDisplay::from(data))
+}
+
 #[cfg(test)]
 mod tests {
 	use super::*;
-- 
GitLab