From 11fa9af104b0ac83af9bdebb3a811fc91aa03366 Mon Sep 17 00:00:00 2001
From: ZhiYong <zhiyong0705@hotmail.com>
Date: Mon, 26 Sep 2022 15:46:59 +0800
Subject: [PATCH] Remove discarded blocks and states from database by default
 (#11983)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* 1.Add pruning param "canonical" in sc-cli.
2.Make PruningMode's default value to ArchiveCanonical.

* Update tests in sc-state-db.

* Update tests in sc-state-db.

* 1.Add a new value `AllWithNonFinalized` in `enum BlocksPruning` which Corresponds to `blocks_pruning 0` in CLI .
2.Change value `All` to `AllFinalized` in `enum BlocksPruning` and make it to keep full finalized block history.

* Make some corresponding adjustments based on the content in the conversation.

* Update client/db/src/lib.rs

Co-authored-by: Bastian Köcher <git@kchr.de>

* Apply suggestions from code review.

* 1.Change `blocks_pruning` to be like `state_pruning` .

* Fmt and add some doc.

* Update client/cli/src/params/pruning_params.rs

Co-authored-by: Bastian Köcher <git@kchr.de>

* Update client/cli/src/params/pruning_params.rs

Co-authored-by: Bastian Köcher <git@kchr.de>

* Update doc.

* Change `new_test_with_tx_storage` to take `BlocksPruning`.

* Fmt

Co-authored-by: Bastian Köcher <git@kchr.de>
---
 .../bin/node/cli/benches/block_production.rs  |   2 +-
 .../bin/node/cli/benches/transaction_pool.rs  |   2 +-
 substrate/bin/node/testing/src/bench.rs       |   2 +-
 substrate/client/cli/src/config.rs            |   4 +-
 .../client/cli/src/params/pruning_params.rs   |  31 ++-
 substrate/client/db/benches/state_access.rs   |   2 +-
 substrate/client/db/src/lib.rs                | 215 ++++++++++++++----
 .../client/service/test/src/client/mod.rs     |   4 +-
 substrate/client/service/test/src/lib.rs      |   2 +-
 substrate/test-utils/client/src/lib.rs        |   5 +-
 10 files changed, 211 insertions(+), 58 deletions(-)

diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs
index 0a734fa4474..4fcebb123d9 100644
--- a/substrate/bin/node/cli/benches/block_production.rs
+++ b/substrate/bin/node/cli/benches/block_production.rs
@@ -74,7 +74,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
 		trie_cache_maximum_size: Some(64 * 1024 * 1024),
 		state_pruning: Some(PruningMode::ArchiveAll),
-		blocks_pruning: BlocksPruning::All,
+		blocks_pruning: BlocksPruning::KeepAll,
 		chain_spec: spec,
 		wasm_method: WasmExecutionMethod::Compiled {
 			instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs
index e6084fba824..a8839642ddc 100644
--- a/substrate/bin/node/cli/benches/transaction_pool.rs
+++ b/substrate/bin/node/cli/benches/transaction_pool.rs
@@ -68,7 +68,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
 		trie_cache_maximum_size: Some(64 * 1024 * 1024),
 		state_pruning: Some(PruningMode::ArchiveAll),
-		blocks_pruning: BlocksPruning::All,
+		blocks_pruning: BlocksPruning::KeepAll,
 		chain_spec: spec,
 		wasm_method: WasmExecutionMethod::Interpreted,
 		// NOTE: we enforce the use of the native runtime to make the errors more debuggable
diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs
index 7980cc102fb..59f1fa94c9b 100644
--- a/substrate/bin/node/testing/src/bench.rs
+++ b/substrate/bin/node/testing/src/bench.rs
@@ -392,7 +392,7 @@ impl BenchDb {
 			trie_cache_maximum_size: Some(16 * 1024 * 1024),
 			state_pruning: Some(PruningMode::ArchiveAll),
 			source: database_type.into_settings(dir.into()),
-			blocks_pruning: sc_client_db::BlocksPruning::All,
+			blocks_pruning: sc_client_db::BlocksPruning::KeepAll,
 		};
 		let task_executor = TaskExecutor::new();
 
diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs
index bc5941914de..fad2ec7bc4a 100644
--- a/substrate/client/cli/src/config.rs
+++ b/substrate/client/cli/src/config.rs
@@ -251,11 +251,11 @@ pub trait CliConfiguration<DCV: DefaultConfigurationValues = ()>: Sized {
 	/// Get the block pruning mode.
 	///
 	/// By default this is retrieved from `block_pruning` if it is available. Otherwise its
-	/// `BlocksPruning::All`.
+	/// `BlocksPruning::KeepFinalized`.
 	fn blocks_pruning(&self) -> Result<BlocksPruning> {
 		self.pruning_params()
 			.map(|x| x.blocks_pruning())
-			.unwrap_or_else(|| Ok(BlocksPruning::All))
+			.unwrap_or_else(|| Ok(BlocksPruning::KeepFinalized))
 	}
 
 	/// Get the chain ID (string).
diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs
index 34a0982e63d..b764e4722e9 100644
--- a/substrate/client/cli/src/params/pruning_params.rs
+++ b/substrate/client/cli/src/params/pruning_params.rs
@@ -30,13 +30,16 @@ pub struct PruningParams {
 	/// or for all of the canonical blocks (i.e 'archive-canonical').
 	#[clap(alias = "pruning", long, value_name = "PRUNING_MODE")]
 	pub state_pruning: Option<String>,
-	/// Specify the number of finalized blocks to keep in the database.
+	/// Specify the blocks pruning mode, a number of blocks to keep or 'archive'.
 	///
-	/// Default is to keep all blocks.
+	/// Default is to keep all finalized blocks.
+	/// otherwise, all blocks can be kept (i.e 'archive'),
+	/// or for all canonical blocks (i.e 'archive-canonical'),
+	/// or for the last N blocks (i.e a number).
 	///
 	/// NOTE: only finalized blocks are subject for removal!
 	#[clap(alias = "keep-blocks", long, value_name = "COUNT")]
-	pub blocks_pruning: Option<u32>,
+	pub blocks_pruning: Option<String>,
 }
 
 impl PruningParams {
@@ -46,9 +49,12 @@ impl PruningParams {
 			.as_ref()
 			.map(|s| match s.as_str() {
 				"archive" => Ok(PruningMode::ArchiveAll),
+				"archive-canonical" => Ok(PruningMode::ArchiveCanonical),
 				bc => bc
 					.parse()
-					.map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))
+					.map_err(|_| {
+						error::Error::Input("Invalid state pruning mode specified".to_string())
+					})
 					.map(PruningMode::blocks_pruning),
 			})
 			.transpose()
@@ -56,9 +62,18 @@ impl PruningParams {
 
 	/// Get the block pruning value from the parameters
 	pub fn blocks_pruning(&self) -> error::Result<BlocksPruning> {
-		Ok(match self.blocks_pruning {
-			Some(n) => BlocksPruning::Some(n),
-			None => BlocksPruning::All,
-		})
+		match self.blocks_pruning.as_ref() {
+			Some(bp) => match bp.as_str() {
+				"archive" => Ok(BlocksPruning::KeepAll),
+				"archive-canonical" => Ok(BlocksPruning::KeepFinalized),
+				bc => bc
+					.parse()
+					.map_err(|_| {
+						error::Error::Input("Invalid blocks pruning mode specified".to_string())
+					})
+					.map(BlocksPruning::Some),
+			},
+			None => Ok(BlocksPruning::KeepFinalized),
+		}
 	}
 }
diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs
index 78aed7858e3..714dda82d61 100644
--- a/substrate/client/db/benches/state_access.rs
+++ b/substrate/client/db/benches/state_access.rs
@@ -122,7 +122,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend<Block>
 		trie_cache_maximum_size,
 		state_pruning: Some(PruningMode::ArchiveAll),
 		source: DatabaseSource::ParityDb { path },
-		blocks_pruning: BlocksPruning::All,
+		blocks_pruning: BlocksPruning::KeepAll,
 	};
 
 	Backend::new(settings, 100).expect("Creates backend")
diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs
index 79ef7e9b662..32c4c9ef85e 100644
--- a/substrate/client/db/src/lib.rs
+++ b/substrate/client/db/src/lib.rs
@@ -320,10 +320,12 @@ pub struct DatabaseSettings {
 }
 
 /// Block pruning settings.
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, PartialEq)]
 pub enum BlocksPruning {
-	/// Keep full block history.
-	All,
+	/// Keep full block history, of every block that was ever imported.
+	KeepAll,
+	/// Keep full finalized block history.
+	KeepFinalized,
 	/// Keep N recent finalized blocks.
 	Some(u32),
 }
@@ -1061,19 +1063,27 @@ impl<Block: BlockT> Backend<Block> {
 	/// Create new memory-backed client backend for tests.
 	#[cfg(any(test, feature = "test-helpers"))]
 	pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
-		Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay)
+		Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
 	}
 
 	/// Create new memory-backed client backend for tests.
 	#[cfg(any(test, feature = "test-helpers"))]
-	pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
+	pub fn new_test_with_tx_storage(
+		blocks_pruning: BlocksPruning,
+		canonicalization_delay: u64,
+	) -> Self {
 		let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
 		let db = sp_database::as_database(db);
+		let state_pruning = match blocks_pruning {
+			BlocksPruning::KeepAll => PruningMode::ArchiveAll,
+			BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
+			BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
+		};
 		let db_setting = DatabaseSettings {
 			trie_cache_maximum_size: Some(16 * 1024 * 1024),
-			state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)),
+			state_pruning: Some(state_pruning),
 			source: DatabaseSource::Custom { db, require_create_flag: true },
-			blocks_pruning: BlocksPruning::Some(blocks_pruning),
+			blocks_pruning,
 		};
 
 		Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
@@ -1707,32 +1717,47 @@ impl<Block: BlockT> Backend<Block> {
 		finalized: NumberFor<Block>,
 		displaced: &FinalizationOutcome<Block::Hash, NumberFor<Block>>,
 	) -> ClientResult<()> {
-		if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
-			// Always keep the last finalized block
-			let keep = std::cmp::max(blocks_pruning, 1);
-			if finalized >= keep.into() {
-				let number = finalized.saturating_sub(keep.into());
-				self.prune_block(transaction, BlockId::<Block>::number(number))?;
-			}
+		match self.blocks_pruning {
+			BlocksPruning::KeepAll => {},
+			BlocksPruning::Some(blocks_pruning) => {
+				// Always keep the last finalized block
+				let keep = std::cmp::max(blocks_pruning, 1);
+				if finalized >= keep.into() {
+					let number = finalized.saturating_sub(keep.into());
+					self.prune_block(transaction, BlockId::<Block>::number(number))?;
+				}
+				self.prune_displaced_branches(transaction, finalized, displaced)?;
+			},
+			BlocksPruning::KeepFinalized => {
+				self.prune_displaced_branches(transaction, finalized, displaced)?;
+			},
+		}
+		Ok(())
+	}
 
-			// Also discard all blocks from displaced branches
-			for h in displaced.leaves() {
-				let mut number = finalized;
-				let mut hash = *h;
-				// Follow displaced chains back until we reach a finalized block.
-				// Since leaves are discarded due to finality, they can't have parents
-				// that are canonical, but not yet finalized. So we stop deleting as soon as
-				// we reach canonical chain.
-				while self.blockchain.hash(number)? != Some(hash) {
-					let id = BlockId::<Block>::hash(hash);
-					match self.blockchain.header(id)? {
-						Some(header) => {
-							self.prune_block(transaction, id)?;
-							number = header.number().saturating_sub(One::one());
-							hash = *header.parent_hash();
-						},
-						None => break,
-					}
+	fn prune_displaced_branches(
+		&self,
+		transaction: &mut Transaction<DbHash>,
+		finalized: NumberFor<Block>,
+		displaced: &FinalizationOutcome<Block::Hash, NumberFor<Block>>,
+	) -> ClientResult<()> {
+		// Discard all blocks from displaced branches
+		for h in displaced.leaves() {
+			let mut number = finalized;
+			let mut hash = *h;
+			// Follow displaced chains back until we reach a finalized block.
+			// Since leaves are discarded due to finality, they can't have parents
+			// that are canonical, but not yet finalized. So we stop deleting as soon as
+			// we reach canonical chain.
+			while self.blockchain.hash(number)? != Some(hash) {
+				let id = BlockId::<Block>::hash(hash);
+				match self.blockchain.header(id)? {
+					Some(header) => {
+						self.prune_block(transaction, id)?;
+						number = header.number().saturating_sub(One::one());
+						hash = *header.parent_hash();
+					},
+					None => break,
 				}
 			}
 		}
@@ -1752,6 +1777,13 @@ impl<Block: BlockT> Backend<Block> {
 			columns::BODY,
 			id,
 		)?;
+		utils::remove_from_db(
+			transaction,
+			&*self.storage.db,
+			columns::KEY_LOOKUP,
+			columns::JUSTIFICATIONS,
+			id,
+		)?;
 		if let Some(index) =
 			read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
 		{
@@ -2506,7 +2538,7 @@ pub(crate) mod tests {
 				trie_cache_maximum_size: Some(16 * 1024 * 1024),
 				state_pruning: Some(PruningMode::blocks_pruning(1)),
 				source: DatabaseSource::Custom { db: backing, require_create_flag: false },
-				blocks_pruning: BlocksPruning::All,
+				blocks_pruning: BlocksPruning::KeepFinalized,
 			},
 			0,
 		)
@@ -3176,7 +3208,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn prune_blocks_on_finalize() {
-		let backend = Backend::<Block>::new_test_with_tx_storage(2, 0);
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 0);
 		let mut blocks = Vec::new();
 		let mut prev_hash = Default::default();
 		for i in 0..5 {
@@ -3210,9 +3242,114 @@ pub(crate) mod tests {
 		assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());
 	}
 
+	#[test]
+	fn prune_blocks_on_finalize_in_keep_all() {
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::KeepAll, 0);
+		let mut blocks = Vec::new();
+		let mut prev_hash = Default::default();
+		for i in 0..5 {
+			let hash = insert_block(
+				&backend,
+				i,
+				prev_hash,
+				None,
+				Default::default(),
+				vec![i.into()],
+				None,
+			)
+			.unwrap();
+			blocks.push(hash);
+			prev_hash = hash;
+		}
+
+		let mut op = backend.begin_operation().unwrap();
+		backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap();
+		for i in 1..3 {
+			op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap();
+		}
+		backend.commit_operation(op).unwrap();
+
+		let bc = backend.blockchain();
+		assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap());
+		assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap());
+		assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap());
+		assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap());
+		assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());
+	}
+
+	#[test]
+	fn prune_blocks_on_finalize_with_fork_in_keep_all() {
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::KeepAll, 10);
+		let mut blocks = Vec::new();
+		let mut prev_hash = Default::default();
+		for i in 0..5 {
+			let hash = insert_block(
+				&backend,
+				i,
+				prev_hash,
+				None,
+				Default::default(),
+				vec![i.into()],
+				None,
+			)
+			.unwrap();
+			blocks.push(hash);
+			prev_hash = hash;
+		}
+
+		// insert a fork at block 2
+		let fork_hash_root = insert_block(
+			&backend,
+			2,
+			blocks[1],
+			None,
+			sp_core::H256::random(),
+			vec![2.into()],
+			None,
+		)
+		.unwrap();
+		insert_block(
+			&backend,
+			3,
+			fork_hash_root,
+			None,
+			H256::random(),
+			vec![3.into(), 11.into()],
+			None,
+		)
+		.unwrap();
+
+		let mut op = backend.begin_operation().unwrap();
+		backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap();
+		op.mark_head(BlockId::Hash(blocks[4])).unwrap();
+		backend.commit_operation(op).unwrap();
+
+		let bc = backend.blockchain();
+		assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap());
+
+		for i in 1..5 {
+			let mut op = backend.begin_operation().unwrap();
+			backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap();
+			op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap();
+			backend.commit_operation(op).unwrap();
+		}
+
+		assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap());
+		assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap());
+		assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap());
+		assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap());
+		assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());
+
+		assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap());
+		assert_eq!(bc.info().best_number, 4);
+		for i in 0..5 {
+			assert!(bc.hash(i).unwrap().is_some());
+		}
+	}
+
 	#[test]
 	fn prune_blocks_on_finalize_with_fork() {
-		let backend = Backend::<Block>::new_test_with_tx_storage(2, 10);
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
 		let mut blocks = Vec::new();
 		let mut prev_hash = Default::default();
 		for i in 0..5 {
@@ -3273,7 +3410,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn indexed_data_block_body() {
-		let backend = Backend::<Block>::new_test_with_tx_storage(1, 10);
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
 
 		let x0 = ExtrinsicWrapper::from(0u64).encode();
 		let x1 = ExtrinsicWrapper::from(1u64).encode();
@@ -3315,7 +3452,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn index_invalid_size() {
-		let backend = Backend::<Block>::new_test_with_tx_storage(1, 10);
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
 
 		let x0 = ExtrinsicWrapper::from(0u64).encode();
 		let x1 = ExtrinsicWrapper::from(1u64).encode();
@@ -3350,7 +3487,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn renew_transaction_storage() {
-		let backend = Backend::<Block>::new_test_with_tx_storage(2, 10);
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
 		let mut blocks = Vec::new();
 		let mut prev_hash = Default::default();
 		let x1 = ExtrinsicWrapper::from(0u64).encode();
@@ -3397,7 +3534,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn remove_leaf_block_works() {
-		let backend = Backend::<Block>::new_test_with_tx_storage(2, 10);
+		let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(2), 10);
 		let mut blocks = Vec::new();
 		let mut prev_hash = Default::default();
 		for i in 0..2 {
diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs
index 2ab1415f8ca..e0f47110d90 100644
--- a/substrate/client/service/test/src/client/mod.rs
+++ b/substrate/client/service/test/src/client/mod.rs
@@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() {
 			DatabaseSettings {
 				trie_cache_maximum_size: Some(1 << 20),
 				state_pruning: Some(PruningMode::ArchiveAll),
-				blocks_pruning: BlocksPruning::All,
+				blocks_pruning: BlocksPruning::KeepAll,
 				source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 },
 			},
 			u64::MAX,
@@ -1426,7 +1426,7 @@ fn returns_status_for_pruned_blocks() {
 			DatabaseSettings {
 				trie_cache_maximum_size: Some(1 << 20),
 				state_pruning: Some(PruningMode::blocks_pruning(1)),
-				blocks_pruning: BlocksPruning::All,
+				blocks_pruning: BlocksPruning::KeepFinalized,
 				source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 },
 			},
 			u64::MAX,
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 11c1cbaf7af..23245d46cba 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -237,7 +237,7 @@ fn node_config<
 		database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
 		trie_cache_maximum_size: Some(16 * 1024 * 1024),
 		state_pruning: Default::default(),
-		blocks_pruning: BlocksPruning::All,
+		blocks_pruning: BlocksPruning::KeepFinalized,
 		chain_spec: Box::new((*spec).clone()),
 		wasm_method: sc_service::config::WasmExecutionMethod::Interpreted,
 		wasm_runtime_overrides: Default::default(),
diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs
index be4549c9957..d3e71f0ad28 100644
--- a/substrate/test-utils/client/src/lib.rs
+++ b/substrate/test-utils/client/src/lib.rs
@@ -26,7 +26,7 @@ pub use sc_client_api::{
 	execution_extensions::{ExecutionExtensions, ExecutionStrategies},
 	BadBlocks, ForkBlocks,
 };
-pub use sc_client_db::{self, Backend};
+pub use sc_client_db::{self, Backend, BlocksPruning};
 pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod};
 pub use sc_service::{client, RpcHandlers};
 pub use sp_consensus;
@@ -102,7 +102,8 @@ impl<Block: BlockT, ExecutorDispatch, G: GenesisInit>
 
 	/// Create new `TestClientBuilder` with default backend and storage chain mode
 	pub fn with_tx_storage(blocks_pruning: u32) -> Self {
-		let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0));
+		let backend =
+			Arc::new(Backend::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), 0));
 		Self::with_backend(backend)
 	}
 }
-- 
GitLab