From 4f852d6922226c737bffc0277ec967949739ecc9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <bkchr@users.noreply.github.com>
Date: Tue, 3 Mar 2020 10:24:26 +0100
Subject: [PATCH] Expose `state-db` memory info (#5110)

This exposes memory statistics from the state-db.
---
 substrate/Cargo.lock                          | 33 +++++++
 substrate/client/api/src/client.rs            | 57 +++++++++++-
 substrate/client/db/src/lib.rs                | 16 ++--
 substrate/client/db/src/light.rs              |  9 +-
 substrate/client/informant/src/lib.rs         |  5 +-
 substrate/client/service/src/builder.rs       | 16 +++-
 substrate/client/state-db/Cargo.toml          |  3 +
 substrate/client/state-db/src/lib.rs          | 86 ++++++++++++++-----
 substrate/client/state-db/src/noncanonical.rs | 10 ++-
 substrate/client/state-db/src/pruning.rs      |  3 +-
 10 files changed, 196 insertions(+), 42 deletions(-)

diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index 4da524e7ce8..47583e57e3e 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -1226,6 +1226,33 @@ dependencies = [
  "serde_json",
 ]
 
+[[package]]
+name = "ethbloom"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32cfe1c169414b709cf28aa30c74060bdb830a03a8ba473314d079ac79d80a5f"
+dependencies = [
+ "crunchy",
+ "fixed-hash",
+ "impl-rlp",
+ "impl-serde 0.2.3",
+ "tiny-keccak 1.5.0",
+]
+
+[[package]]
+name = "ethereum-types"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba744248e3553a393143d5ebb68939fc3a4ec0c22a269682535f5ffe7fed728c"
+dependencies = [
+ "ethbloom",
+ "fixed-hash",
+ "impl-rlp",
+ "impl-serde 0.2.3",
+ "primitive-types",
+ "uint",
+]
+
 [[package]]
 name = "evm"
 version = "0.15.0"
@@ -4626,7 +4653,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ef1476e40bf8f5c6776e9600983435821ca86eb9819d74a6207cca69d091406a"
 dependencies = [
  "cfg-if",
+ "ethereum-types",
+ "hashbrown",
  "impl-trait-for-tuples",
+ "lru",
  "parity-util-mem-derive",
  "parking_lot 0.10.0",
  "primitive-types",
@@ -6432,7 +6462,10 @@ dependencies = [
  "env_logger 0.7.1",
  "log 0.4.8",
  "parity-scale-codec",
+ "parity-util-mem",
+ "parity-util-mem-derive",
  "parking_lot 0.10.0",
+ "sc-client-api",
  "sp-core",
 ]
 
diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs
index 7503ce4a79e..4980015568b 100644
--- a/substrate/client/api/src/client.rs
+++ b/substrate/client/api/src/client.rs
@@ -97,13 +97,56 @@ pub struct ClientInfo<Block: BlockT> {
 	pub usage: Option<UsageInfo>,
 }
 
+/// A wrapper to store the size of some memory.
+#[derive(Default, Clone, Debug, Copy)]
+pub struct MemorySize(usize);
+
+impl MemorySize {
+	/// Creates `Self` from the given `bytes` size.
+	pub fn from_bytes(bytes: usize) -> Self {
+		Self(bytes)
+	}
+
+	/// Returns the memory size as bytes.
+	pub fn as_bytes(self) -> usize {
+		self.0
+	}
+}
+
+impl fmt::Display for MemorySize {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		if self.0 < 1024 {
+			write!(f, "{} bytes", self.0)
+		} else if self.0 < 1024 * 1024 {
+			write!(f, "{:.2} KiB", self.0 as f64 / 1024f64)
+		} else if self.0 < 1024 * 1024 * 1024 {
+			write!(f, "{:.2} MiB", self.0 as f64 / (1024f64 * 1024f64))
+		} else {
+			write!(f, "{:.2} GiB", self.0 as f64 / (1024f64 * 1024f64 * 1024f64))
+		}
+	}
+}
+
+/// Memory statistics for state db.
+#[derive(Default, Clone, Debug)]
+pub struct StateDbMemoryInfo {
+	/// Memory usage of the non-canonical overlay
+	pub non_canonical: MemorySize,
+	/// Memory usage of the pruning window.
+	pub pruning: Option<MemorySize>,
+	/// Memory usage of the pinned blocks.
+	pub pinned: MemorySize,
+}
+
 /// Memory statistics for client instance.
 #[derive(Default, Clone, Debug)]
 pub struct MemoryInfo {
 	/// Size of state cache.
-	pub state_cache: usize,
+	pub state_cache: MemorySize,
 	/// Size of backend database cache.
-	pub database_cache: usize,
+	pub database_cache: MemorySize,
+	/// Size of the state db.
+	pub state_db: StateDbMemoryInfo,
 }
 
 /// I/O statistics for client instance.
@@ -144,10 +187,16 @@ pub struct UsageInfo {
 
 impl fmt::Display for UsageInfo {
 	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-		write!(f,
-			"caches: ({} state, {} db overlay), i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} key writes)",
+		write!(
+			f,
+			"caches: ({} state, {} db overlay), \
+			 state db: ({} non-canonical, {} pruning, {} pinned), \
+			 i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} key writes)",
 			self.memory.state_cache,
 			self.memory.database_cache,
+			self.memory.state_db.non_canonical,
+			self.memory.state_db.pruning.unwrap_or_default(),
+			self.memory.state_db.pinned,
 			self.io.transactions,
 			self.io.bytes_written,
 			self.io.bytes_read,
diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs
index 5173497509c..b4dd98457d4 100644
--- a/substrate/client/db/src/lib.rs
+++ b/substrate/client/db/src/lib.rs
@@ -46,9 +46,11 @@ use std::path::PathBuf;
 use std::io;
 use std::collections::HashMap;
 
-use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo};
-use sc_client_api::backend::NewBlockState;
-use sc_client_api::backend::PrunableStateChangesTrieStorage;
+use sc_client_api::{
+	ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo, MemorySize,
+	execution_extensions::ExecutionExtensions,
+	backend::{NewBlockState, PrunableStateChangesTrieStorage},
+};
 use sp_blockchain::{
 	Result as ClientResult, Error as ClientError,
 	well_known_cache_keys, HeaderBackend,
@@ -1455,13 +1457,17 @@ impl<Block: BlockT> sc_client_api::backend::Backend<Block> for Backend<Block> {
 				self.state_usage.take(),
 			)
 		);
-		let database_cache = parity_util_mem::malloc_size(&*self.storage.db);
-		let state_cache = (*&self.shared_cache).lock().used_storage_cache_size();
+		let database_cache = MemorySize::from_bytes(parity_util_mem::malloc_size(&*self.storage.db));
+		let state_cache = MemorySize::from_bytes(
+			(*&self.shared_cache).lock().used_storage_cache_size(),
+		);
+		let state_db = self.storage.state_db.memory_info();
 
 		Some(UsageInfo {
 			memory: MemoryInfo {
 				state_cache,
 				database_cache,
+				state_db,
 			},
 			io: IoInfo {
 				transactions: io_stats.transactions,
diff --git a/substrate/client/db/src/light.rs b/substrate/client/db/src/light.rs
index 14ce6ac0f9a..3d30598b19e 100644
--- a/substrate/client/db/src/light.rs
+++ b/substrate/client/db/src/light.rs
@@ -317,7 +317,7 @@ impl<Block: BlockT> LightStorage<Block> {
 			// if the header includes changes trie root, let's build a changes tries roots CHT
 			if header.digest().log(DigestItem::as_changes_trie_root).is_some() {
 				let mut current_num = new_cht_start;
-				let cht_range = ::std::iter::from_fn(|| {
+				let cht_range = std::iter::from_fn(|| {
 					let old_current_num = current_num;
 					current_num = current_num + One::one();
 					Some(old_current_num)
@@ -572,15 +572,16 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 
 	#[cfg(not(target_os = "unknown"))]
 	fn usage_info(&self) -> Option<UsageInfo> {
-		use sc_client_api::{MemoryInfo, IoInfo};
+		use sc_client_api::{MemoryInfo, IoInfo, MemorySize};
 
-		let database_cache = parity_util_mem::malloc_size(&*self.db);
+		let database_cache = MemorySize::from_bytes(parity_util_mem::malloc_size(&*self.db));
 		let io_stats = self.io_stats.take_or_else(|| self.db.io_stats(kvdb::IoStatsKind::SincePrevious));
 
 		Some(UsageInfo {
 			memory: MemoryInfo {
 				database_cache,
-				state_cache: 0,
+				state_cache: Default::default(),
+				state_db: Default::default(),
 			},
 			io: IoInfo {
 				transactions: io_stats.transactions,
diff --git a/substrate/client/informant/src/lib.rs b/substrate/client/informant/src/lib.rs
index 699dcfdd742..d104a64a2db 100644
--- a/substrate/client/informant/src/lib.rs
+++ b/substrate/client/informant/src/lib.rs
@@ -46,7 +46,10 @@ pub fn build(service: &impl AbstractService, format: OutputFormat) -> impl futur
 			if let Some(ref usage) = info.usage {
 				trace!(target: "usage", "Usage statistics: {}", usage);
 			} else {
-				trace!(target: "usage", "Usage statistics not displayed as backend does not provide it")
+				trace!(
+					target: "usage",
+					"Usage statistics not displayed as backend does not provide it",
+				)
 			}
 			#[cfg(not(target_os = "unknown"))]
 			trace!(
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index e5e4e132f9c..cce126f1b39 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -1085,10 +1085,18 @@ ServiceBuilder<
 				"finalized_hash" => ?info.chain.finalized_hash,
 				"bandwidth_download" => bandwidth_download,
 				"bandwidth_upload" => bandwidth_upload,
-				"used_state_cache_size" => info.usage.as_ref().map(|usage| usage.memory.state_cache).unwrap_or(0),
-				"used_db_cache_size" => info.usage.as_ref().map(|usage| usage.memory.database_cache).unwrap_or(0),
-				"disk_read_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_read).unwrap_or(0),
-				"disk_write_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_written).unwrap_or(0),
+				"used_state_cache_size" => info.usage.as_ref()
+					.map(|usage| usage.memory.state_cache.as_bytes())
+					.unwrap_or(0),
+				"used_db_cache_size" => info.usage.as_ref()
+					.map(|usage| usage.memory.database_cache.as_bytes())
+					.unwrap_or(0),
+				"disk_read_per_sec" => info.usage.as_ref()
+					.map(|usage| usage.io.bytes_read)
+					.unwrap_or(0),
+				"disk_write_per_sec" => info.usage.as_ref()
+					.map(|usage| usage.io.bytes_written)
+					.unwrap_or(0),
 			);
 			if let Some(metrics) = metrics.as_ref() {
 				metrics.memory_usage_bytes.set(memory);
diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml
index 5a69d8b31ec..77e06670d1d 100644
--- a/substrate/client/state-db/Cargo.toml
+++ b/substrate/client/state-db/Cargo.toml
@@ -11,8 +11,11 @@ description = "State database maintenance. Handles canonicalization and pruning
 [dependencies]
 parking_lot = "0.10.0"
 log = "0.4.8"
+sc-client-api = { version = "2.0.0-alpha.2", path = "../api" }
 sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" }
 codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] }
+parity-util-mem = "0.5.1"
+parity-util-mem-derive = "0.1.0"
 
 [dev-dependencies]
 env_logger = "0.7.0"
diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs
index f670e4f35f3..49b1a59285e 100644
--- a/substrate/client/state-db/src/lib.rs
+++ b/substrate/client/state-db/src/lib.rs
@@ -31,7 +31,8 @@
 
 mod noncanonical;
 mod pruning;
-#[cfg(test)] mod test;
+#[cfg(test)]
+mod test;
 
 use std::fmt;
 use parking_lot::RwLock;
@@ -40,6 +41,8 @@ use std::collections::{HashMap, hash_map::Entry};
 use noncanonical::NonCanonicalOverlay;
 use pruning::RefWindow;
 use log::trace;
+use parity_util_mem::{MallocSizeOf, malloc_size};
+use sc_client_api::{StateDbMemoryInfo, MemorySize};
 
 const PRUNING_MODE: &[u8] = b"mode";
 const PRUNING_MODE_ARCHIVE: &[u8] = b"archive";
@@ -120,7 +123,6 @@ pub struct ChangeSet<H: Hash> {
 	pub deleted: Vec<H>,
 }
 
-
 /// A set of changes to the backing database.
 #[derive(Default, Debug, Clone)]
 pub struct CommitSet<H: Hash> {
@@ -196,8 +198,11 @@ struct StateDbSync<BlockHash: Hash, Key: Hash> {
 	pinned: HashMap<BlockHash, u32>,
 }
 
-impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
-	pub fn new<D: MetaDb>(mode: PruningMode, db: &D) -> Result<StateDbSync<BlockHash, Key>, Error<D::Error>> {
+impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf> StateDbSync<BlockHash, Key> {
+	fn new<D: MetaDb>(
+		mode: PruningMode,
+		db: &D,
+	) -> Result<StateDbSync<BlockHash, Key>, Error<D::Error>> {
 		trace!(target: "state-db", "StateDb settings: {:?}", mode);
 
 		// Check that settings match
@@ -234,7 +239,13 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		}
 	}
 
-	pub fn insert_block<E: fmt::Debug>(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet<Key>) -> Result<CommitSet<Key>, Error<E>> {
+	fn insert_block<E: fmt::Debug>(
+		&mut self,
+		hash: &BlockHash,
+		number: u64,
+		parent_hash: &BlockHash,
+		mut changeset: ChangeSet<Key>,
+	) -> Result<CommitSet<Key>, Error<E>> {
 		let mut meta = ChangeSet::default();
 		if number == 0 {
 			// Save pruning mode when writing first block.
@@ -247,7 +258,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 				// write changes immediately
 				Ok(CommitSet {
 					data: changeset,
-					meta: meta,
+					meta,
 				})
 			},
 			PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => {
@@ -260,7 +271,10 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		}
 	}
 
-	pub fn canonicalize_block<E: fmt::Debug>(&mut self, hash: &BlockHash) -> Result<CommitSet<Key>, Error<E>> {
+	fn canonicalize_block<E: fmt::Debug>(
+		&mut self,
+		hash: &BlockHash,
+	) -> Result<CommitSet<Key>, Error<E>> {
 		let mut commit = CommitSet::default();
 		if self.mode == PruningMode::ArchiveAll {
 			return Ok(commit)
@@ -280,18 +294,23 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		Ok(commit)
 	}
 
-	pub fn best_canonical(&self) -> Option<u64> {
+	fn best_canonical(&self) -> Option<u64> {
 		return self.non_canonical.last_canonicalized_block_number()
 	}
 
-	pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool {
+	fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool {
 		match self.mode {
 			PruningMode::ArchiveAll => false,
 			PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => {
 				if self.best_canonical().map(|c| number > c).unwrap_or(true) {
 					!self.non_canonical.have_block(hash)
 				} else {
-					self.pruning.as_ref().map_or(false, |pruning| number < pruning.pending() || !pruning.have_block(hash))
+					self.pruning
+						.as_ref()
+						.map_or(
+							false,
+							|pruning| number < pruning.pending() || !pruning.have_block(hash),
+						)
 				}
 			}
 		}
@@ -320,7 +339,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 	/// Revert all non-canonical blocks with the best block number.
 	/// Returns a database commit or `None` if not possible.
 	/// For archive an empty commit set is returned.
-	pub fn revert_one(&mut self) -> Option<CommitSet<Key>> {
+	fn revert_one(&mut self) -> Option<CommitSet<Key>> {
 		match self.mode {
 			PruningMode::ArchiveAll => {
 				Some(CommitSet::default())
@@ -331,7 +350,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		}
 	}
 
-	pub fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> {
+	fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> {
 		match self.mode {
 			PruningMode::ArchiveAll => Ok(()),
 			PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => {
@@ -352,7 +371,7 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		}
 	}
 
-	pub fn unpin(&mut self, hash: &BlockHash) {
+	fn unpin(&mut self, hash: &BlockHash) {
 		match self.pinned.entry(hash.clone()) {
 			Entry::Occupied(mut entry) => {
 				*entry.get_mut() -= 1;
@@ -377,12 +396,14 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		db.get(key.as_ref()).map_err(|e| Error::Db(e))
 	}
 
-	pub fn apply_pending(&mut self) {
+	fn apply_pending(&mut self) {
 		self.non_canonical.apply_pending();
 		if let Some(pruning) = &mut self.pruning {
 			pruning.apply_pending();
 		}
-		trace!(target: "forks", "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}",
+		trace!(
+			target: "forks",
+			"First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}",
 			self.pruning.as_ref().and_then(|p| p.next_hash()),
 			self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0),
 			self.non_canonical.last_canonicalized_hash(),
@@ -391,12 +412,20 @@ impl<BlockHash: Hash, Key: Hash> StateDbSync<BlockHash, Key> {
 		);
 	}
 
-	pub fn revert_pending(&mut self) {
+	fn revert_pending(&mut self) {
 		if let Some(pruning) = &mut self.pruning {
 			pruning.revert_pending();
 		}
 		self.non_canonical.revert_pending();
 	}
+
+	fn memory_info(&self) -> StateDbMemoryInfo {
+		StateDbMemoryInfo {
+			non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)),
+			pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(p))),
+			pinned: MemorySize::from_bytes(malloc_size(&self.pinned)),
+		}
+	}
 }
 
 /// State DB maintenance. See module description.
@@ -405,21 +434,33 @@ pub struct StateDb<BlockHash: Hash, Key: Hash> {
 	db: RwLock<StateDbSync<BlockHash, Key>>,
 }
 
-impl<BlockHash: Hash, Key: Hash> StateDb<BlockHash, Key> {
+impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf> StateDb<BlockHash, Key> {
 	/// Creates a new instance. Does not expect any metadata in the database.
-	pub fn new<D: MetaDb>(mode: PruningMode, db: &D) -> Result<StateDb<BlockHash, Key>, Error<D::Error>> {
+	pub fn new<D: MetaDb>(
+		mode: PruningMode,
+		db: &D,
+	) -> Result<StateDb<BlockHash, Key>, Error<D::Error>> {
 		Ok(StateDb {
 			db: RwLock::new(StateDbSync::new(mode, db)?)
 		})
 	}
 
 	/// Add a new non-canonical block.
-	pub fn insert_block<E: fmt::Debug>(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet<Key>) -> Result<CommitSet<Key>, Error<E>> {
+	pub fn insert_block<E: fmt::Debug>(
+		&self,
+		hash: &BlockHash,
+		number: u64,
+		parent_hash: &BlockHash,
+		changeset: ChangeSet<Key>,
+	) -> Result<CommitSet<Key>, Error<E>> {
 		self.db.write().insert_block(hash, number, parent_hash, changeset)
 	}
 
 	/// Finalize a previously inserted block.
-	pub fn canonicalize_block<E: fmt::Debug>(&self, hash: &BlockHash) -> Result<CommitSet<Key>, Error<E>> {
+	pub fn canonicalize_block<E: fmt::Debug>(
+		&self,
+		hash: &BlockHash,
+	) -> Result<CommitSet<Key>, Error<E>> {
 		self.db.write().canonicalize_block(hash)
 	}
 
@@ -466,6 +507,11 @@ impl<BlockHash: Hash, Key: Hash> StateDb<BlockHash, Key> {
 	pub fn revert_pending(&self) {
 		self.db.write().revert_pending();
 	}
+
+	/// Returns the current memory statistics of this instance.
+	pub fn memory_info(&self) -> StateDbMemoryInfo {
+		self.db.read().memory_info()
+	}
 }
 
 #[cfg(test)]
diff --git a/substrate/client/state-db/src/noncanonical.rs b/substrate/client/state-db/src/noncanonical.rs
index de7294d770a..6a34523b66f 100644
--- a/substrate/client/state-db/src/noncanonical.rs
+++ b/substrate/client/state-db/src/noncanonical.rs
@@ -30,6 +30,7 @@ const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal";
 const LAST_CANONICAL: &[u8] = b"last_canonical";
 
 /// See module documentation.
+#[derive(parity_util_mem_derive::MallocSizeOf)]
 pub struct NonCanonicalOverlay<BlockHash: Hash, Key: Hash> {
 	last_canonicalized: Option<(BlockHash, u64)>,
 	levels: VecDeque<Vec<BlockOverlay<BlockHash, Key>>>,
@@ -55,6 +56,7 @@ fn to_journal_key(block: u64, index: u64) -> Vec<u8> {
 }
 
 #[cfg_attr(test, derive(PartialEq, Debug))]
+#[derive(parity_util_mem_derive::MallocSizeOf)]
 struct BlockOverlay<BlockHash: Hash, Key: Hash> {
 	hash: BlockHash,
 	journal_key: Vec<u8>,
@@ -99,8 +101,10 @@ fn discard_descendants<BlockHash: Hash, Key: Hash>(
 	let mut discarded = Vec::new();
 	if let Some(level) = levels.get_mut(index) {
 		*level = level.drain(..).filter_map(|overlay| {
-			let parent = parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone();
-			if parent == *hash {
+			let parent = parents.get(&overlay.hash)
+				.expect("there is a parent entry for each entry in levels; qed");
+
+			if parent == hash {
 				discarded.push(overlay.hash.clone());
 				if pinned.contains_key(&overlay.hash) {
 					// save to be discarded later.
@@ -375,7 +379,7 @@ impl<BlockHash: Hash, Key: Hash> NonCanonicalOverlay<BlockHash, Key> {
 		None
 	}
 
-	/// Check if the block is in the canonicalization queue. 
+	/// Check if the block is in the canonicalization queue.
 	pub fn have_block(&self, hash: &BlockHash) -> bool {
 		(self.parents.contains_key(hash) || self.pending_insertions.contains(hash))
 			&& !self.pending_canonicalizations.contains(hash)
diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs
index 71d018087b5..6cf5f260060 100644
--- a/substrate/client/state-db/src/pruning.rs
+++ b/substrate/client/state-db/src/pruning.rs
@@ -31,6 +31,7 @@ const LAST_PRUNED: &[u8] = b"last_pruned";
 const PRUNING_JOURNAL: &[u8] = b"pruning_journal";
 
 /// See module documentation.
+#[derive(parity_util_mem_derive::MallocSizeOf)]
 pub struct RefWindow<BlockHash: Hash, Key: Hash> {
 	/// A queue of keys that should be deleted for each block in the pruning window.
 	death_rows: VecDeque<DeathRow<BlockHash, Key>>,
@@ -46,7 +47,7 @@ pub struct RefWindow<BlockHash: Hash, Key: Hash> {
 	pending_prunings: usize,
 }
 
-#[derive(Debug, PartialEq, Eq)]
+#[derive(Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)]
 struct DeathRow<BlockHash: Hash, Key: Hash> {
 	hash: BlockHash,
 	journal_key: Vec<u8>,
-- 
GitLab