From 0cae7217d88bbda74e1ef97778831aa962f52c43 Mon Sep 17 00:00:00 2001
From: Benjamin Kampmann <ben@parity.io>
Date: Fri, 30 Aug 2019 02:20:26 +0200
Subject: [PATCH] Remove `client.backend` (#2960)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* generalize tree_root to remove client.backend dependency

* replace client.backend.blockchain.header with client.header

* move used_state_cache_size into client info

* Create intermediate Setup State. Fixes #1134

* remove client.backend from finality proof

* update node-template

* move memory backend into test helper mode

* move test helper into client

* starting the big refactor, remove unused functions

* apply_finality

* apply_finality

* replacing more .backend from environment with client directly

* remove .backend from grandpa by using traits

* remove .backend from babe

* remove .backend from tests where it is not needed

* remove .backend from tests

* fixing tests

* fixing tests

* fixing more tests

* fixing tests

* fix all forks test

* fix style

* fixing unnecessary allocation

* remove old test.

* fix service docs

* apply suggestion

* minor clean ups

* turns out the test-helper features actually is being used!

* fixing line length.

* fix line length

* minor cleaning

* Apply suggestions from code review

thanks, @Basti

Co-Authored-By: Bastian Köcher <bkchr@users.noreply.github.com>

* address grumbles

* simplify finalize block on client

* move block back into inner function

* Apply suggestions from code review

Co-Authored-By: DemiMarie-parity <48690212+DemiMarie-parity@users.noreply.github.com>

* use as.ref instead of match

* Update core/client/src/backend.rs

Co-Authored-By: DemiMarie-parity <48690212+DemiMarie-parity@users.noreply.github.com>
---
 substrate/core/cli/src/informant.rs           |   6 +-
 substrate/core/client/db/src/lib.rs           |  34 +-
 substrate/core/client/db/src/light.rs         |  10 +-
 substrate/core/client/src/backend.rs          |  60 +++-
 substrate/core/client/src/blockchain.rs       |  14 +-
 substrate/core/client/src/client.rs           | 308 ++++++++----------
 substrate/core/client/src/in_mem.rs           |   2 +-
 substrate/core/client/src/lib.rs              |   2 +-
 substrate/core/consensus/aura/src/lib.rs      |  17 +-
 substrate/core/consensus/babe/src/lib.rs      |   9 +-
 substrate/core/consensus/babe/src/tests.rs    |  11 +-
 .../core/finality-grandpa/src/environment.rs  |  68 ++--
 .../finality-grandpa/src/finality_proof.rs    |  24 +-
 substrate/core/finality-grandpa/src/import.rs |  13 +-
 .../finality-grandpa/src/justification.rs     |   4 +-
 substrate/core/finality-grandpa/src/lib.rs    |  11 +-
 .../core/finality-grandpa/src/light_import.rs |  92 +++---
 .../core/finality-grandpa/src/observer.rs     |   6 +-
 substrate/core/finality-grandpa/src/tests.rs  |  39 +--
 substrate/core/network/src/chain.rs           |   5 +-
 substrate/core/network/src/test/mod.rs        | 119 ++++---
 substrate/core/network/src/test/sync.rs       |  51 ++-
 substrate/core/rpc/src/chain/tests.rs         |   4 +-
 substrate/core/service/src/builder.rs         | 119 ++++---
 substrate/core/service/src/lib.rs             |  53 +--
 substrate/core/test-client/src/client_ext.rs  |   4 +-
 substrate/core/test-client/src/lib.rs         |   5 +
 substrate/core/test-runtime/client/src/lib.rs |  14 +-
 substrate/node-template/src/service.rs        |  48 +--
 substrate/node/cli/src/service.rs             |  45 ++-
 30 files changed, 626 insertions(+), 571 deletions(-)

diff --git a/substrate/core/cli/src/informant.rs b/substrate/core/cli/src/informant.rs
index 52a5f67c26d..a5fe52c09af 100644
--- a/substrate/core/cli/src/informant.rs
+++ b/substrate/core/cli/src/informant.rs
@@ -16,7 +16,7 @@
 
 //! Console informant. Prints sync progress and block events. Runs on the calling thread.
 
-use client::{backend::Backend, BlockchainEvents};
+use client::BlockchainEvents;
 use futures::{Future, Stream};
 use futures03::{StreamExt as _, TryStreamExt as _};
 use log::{info, warn};
@@ -48,8 +48,8 @@ pub fn build(service: &impl AbstractService) -> impl Future<Item = (), Error = (
 		if let Some((ref last_num, ref last_hash)) = last_best {
 			if n.header.parent_hash() != last_hash && n.is_new_best  {
 				let tree_route = ::client::blockchain::tree_route(
-					#[allow(deprecated)]
-					client.backend().blockchain(),
+					|id| client.header(&id)?.ok_or_else(
+						|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 					BlockId::Hash(last_hash.clone()),
 					BlockId::Hash(n.hash),
 				);
diff --git a/substrate/core/client/db/src/lib.rs b/substrate/core/client/db/src/lib.rs
index a6301596adc..d2a9d54e82f 100644
--- a/substrate/core/client/db/src/lib.rs
+++ b/substrate/core/client/db/src/lib.rs
@@ -197,9 +197,16 @@ pub fn new_client<E, S, Block, RA>(
 	genesis_storage: S,
 	execution_strategies: ExecutionStrategies,
 	keystore: Option<primitives::traits::BareCryptoStorePtr>,
-) -> Result<
-	client::Client<Backend<Block>,
-	client::LocalCallExecutor<Backend<Block>, E>, Block, RA>, client::error::Error
+) -> Result<(
+		client::Client<
+			Backend<Block>,
+			client::LocalCallExecutor<Backend<Block>, E>,
+			Block,
+			RA,
+		>,
+		Arc<Backend<Block>>,
+	),
+	client::error::Error,
 >
 	where
 		Block: BlockT<Hash=H256>,
@@ -208,7 +215,10 @@ pub fn new_client<E, S, Block, RA>(
 {
 	let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?);
 	let executor = client::LocalCallExecutor::new(backend.clone(), executor, keystore);
-	Ok(client::Client::new(backend, executor, genesis_storage, execution_strategies)?)
+	Ok((
+		client::Client::new(backend.clone(), executor, genesis_storage, execution_strategies)?,
+		backend,
+	))
 }
 
 pub(crate) mod columns {
@@ -871,7 +881,9 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
 		// cannot find tree route with empty DB.
 		if meta.best_hash != Default::default() {
 			let tree_route = ::client::blockchain::tree_route(
-				&self.blockchain,
+				|id| self.blockchain.header(id)?.ok_or_else(
+					|| client::error::Error::UnknownBlock(format!("{:?}", id))
+				),
 				BlockId::Hash(meta.best_hash),
 				BlockId::Hash(route_to),
 			)?;
@@ -2018,6 +2030,7 @@ mod tests {
 	#[test]
 	fn tree_route_works() {
 		let backend = Backend::<Block>::new_test(1000, 100);
+		let blockchain = backend.blockchain();
 		let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default());
 
 		// fork from genesis: 3 prong.
@@ -2031,7 +2044,7 @@ mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				backend.blockchain(),
+				|id| blockchain.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a3),
 				BlockId::Hash(b2)
 			).unwrap();
@@ -2043,7 +2056,7 @@ mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				backend.blockchain(),
+				|id| blockchain.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a1),
 				BlockId::Hash(a3),
 			).unwrap();
@@ -2055,7 +2068,7 @@ mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				backend.blockchain(),
+				|id| blockchain.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a3),
 				BlockId::Hash(a1),
 			).unwrap();
@@ -2067,7 +2080,7 @@ mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				backend.blockchain(),
+				|id| blockchain.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a2),
 				BlockId::Hash(a2),
 			).unwrap();
@@ -2081,13 +2094,14 @@ mod tests {
 	#[test]
 	fn tree_route_child() {
 		let backend = Backend::<Block>::new_test(1000, 100);
+		let blockchain = backend.blockchain();
 
 		let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default());
 		let block1 = insert_header(&backend, 1, block0, Vec::new(), Default::default());
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				backend.blockchain(),
+				|id| blockchain.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(block0),
 				BlockId::Hash(block1),
 			).unwrap();
diff --git a/substrate/core/client/db/src/light.rs b/substrate/core/client/db/src/light.rs
index 86408a155d1..2d100dad296 100644
--- a/substrate/core/client/db/src/light.rs
+++ b/substrate/core/client/db/src/light.rs
@@ -213,7 +213,7 @@ impl<Block: BlockT> LightStorage<Block> {
 		let meta = self.meta.read();
 		if meta.best_hash != Default::default() {
 			let tree_route = ::client::blockchain::tree_route(
-				self,
+				|id| self.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(meta.best_hash),
 				BlockId::Hash(route_to),
 			)?;
@@ -780,7 +780,7 @@ pub(crate) mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				&db,
+				|id| db.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a3),
 				BlockId::Hash(b2)
 			).unwrap();
@@ -792,7 +792,7 @@ pub(crate) mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				&db,
+				|id| db.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a1),
 				BlockId::Hash(a3),
 			).unwrap();
@@ -804,7 +804,7 @@ pub(crate) mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				&db,
+				|id| db.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a3),
 				BlockId::Hash(a1),
 			).unwrap();
@@ -816,7 +816,7 @@ pub(crate) mod tests {
 
 		{
 			let tree_route = ::client::blockchain::tree_route(
-				&db,
+				|id| db.header(id)?.ok_or_else(|| client::error::Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(a2),
 				BlockId::Hash(a2),
 			).unwrap();
diff --git a/substrate/core/client/src/backend.rs b/substrate/core/client/src/backend.rs
index 24b48c9b861..07bb6d6c912 100644
--- a/substrate/core/client/src/backend.rs
+++ b/substrate/core/client/src/backend.rs
@@ -23,7 +23,7 @@ use sr_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenSto
 use sr_primitives::traits::{Block as BlockT, NumberFor};
 use state_machine::backend::Backend as StateBackend;
 use state_machine::ChangesTrieStorage as StateChangesTrieStorage;
-use consensus::well_known_cache_keys;
+use consensus::{well_known_cache_keys, BlockOrigin};
 use hash_db::Hasher;
 use trie::MemoryDB;
 use parking_lot::Mutex;
@@ -34,6 +34,25 @@ pub type StorageCollection = Vec<(Vec<u8>, Option<Vec<u8>>)>;
 /// In memory arrays of storage values for multiple child tries.
 pub type ChildStorageCollection = Vec<(Vec<u8>, StorageCollection)>;
 
+/// Import operation wrapper
+pub struct ClientImportOperation<
+	Block: BlockT,
+	H: Hasher<Out=Block::Hash>,
+	B: Backend<Block, H>,
+> {
+	pub(crate) op: B::BlockImportOperation,
+	pub(crate) notify_imported: Option<(
+		Block::Hash,
+		BlockOrigin,
+		Block::Header,
+		bool,
+		Option<(
+			StorageCollection,
+			ChildStorageCollection,
+		)>)>,
+	pub(crate) notify_finalized: Vec<Block::Hash>,
+}
+
 /// State of a new block.
 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
 pub enum NewBlockState {
@@ -105,6 +124,45 @@ pub trait BlockImportOperation<Block, H> where
 	fn mark_head(&mut self, id: BlockId<Block>) -> error::Result<()>;
 }
 
+/// Finalize Facilities
+pub trait Finalizer<Block: BlockT, H: Hasher<Out=Block::Hash>, B: Backend<Block, H>> {
+	/// Mark all blocks up to given as finalized in operation. If a
+	/// justification is provided it is stored with the given finalized
+	/// block (any other finalized blocks are left unjustified).
+	///
+	/// If the block being finalized is on a different fork from the current
+	/// best block the finalized block is set as best, this might be slightly
+	/// inaccurate (i.e. outdated). Usages that require determining an accurate
+	/// best block should use `SelectChain` instead of the client.
+	fn apply_finality(
+		&self,
+		operation: &mut ClientImportOperation<Block, H, B>,
+		id: BlockId<Block>,
+		justification: Option<Justification>,
+		notify: bool,
+	) -> error::Result<()>;
+
+		
+	/// Finalize a block. This will implicitly finalize all blocks up to it and
+	/// fire finality notifications.
+	///
+	/// If the block being finalized is on a different fork from the current
+	/// best block, the finalized block is set as best. This might be slightly
+	/// inaccurate (i.e. outdated). Usages that require determining an accurate
+	/// best block should use `SelectChain` instead of the client.
+	///
+	/// Pass a flag to indicate whether finality notifications should be propagated.
+	/// This is usually tied to some synchronization state, where we don't send notifications
+	/// while performing major synchronization work.
+	fn finalize_block(
+		&self,
+		id: BlockId<Block>,
+		justification: Option<Justification>,
+		notify: bool,
+	) -> error::Result<()>;
+
+}
+
 /// Provides access to an auxiliary database.
 pub trait AuxStore {
 	/// Insert auxiliary data into key-value store. Deletions occur after insertions.
diff --git a/substrate/core/client/src/blockchain.rs b/substrate/core/client/src/blockchain.rs
index f4a4f8a4aa9..9c9c9153889 100644
--- a/substrate/core/client/src/blockchain.rs
+++ b/substrate/core/client/src/blockchain.rs
@@ -197,21 +197,11 @@ impl<Block: BlockT> TreeRoute<Block> {
 }
 
 /// Compute a tree-route between two blocks. See tree-route docs for more details.
-pub fn tree_route<Block: BlockT, Backend: HeaderBackend<Block>>(
-	backend: &Backend,
+pub fn tree_route<Block: BlockT, F: Fn(BlockId<Block>) -> Result<<Block as BlockT>::Header>>(
+	load_header: F,
 	from: BlockId<Block>,
 	to: BlockId<Block>,
 ) -> Result<TreeRoute<Block>> {
-	use sr_primitives::traits::Header;
-
-	let load_header = |id: BlockId<Block>| {
-		match backend.header(id) {
-			Ok(Some(hdr)) => Ok(hdr),
-			Ok(None) => Err(Error::UnknownBlock(format!("Unknown block {:?}", id))),
-			Err(e) => Err(e),
-		}
-	};
-
 	let mut from = load_header(from)?;
 	let mut to = load_header(to)?;
 
diff --git a/substrate/core/client/src/client.rs b/substrate/core/client/src/client.rs
index 5b0cba3a405..41dae589705 100644
--- a/substrate/core/client/src/client.rs
+++ b/substrate/core/client/src/client.rs
@@ -61,7 +61,7 @@ use crate::{
 	},
 	backend::{
 		self, BlockImportOperation, PrunableStateChangesTrieStorage,
-		StorageCollection, ChildStorageCollection
+		ClientImportOperation, Finalizer,
 	},
 	blockchain::{
 		self, Info as ChainInfo, Backend as ChainBackend,
@@ -128,21 +128,6 @@ pub struct Client<B, E, Block, RA> where Block: BlockT {
 	_phantom: PhantomData<RA>,
 }
 
-/// Client import operation, a wrapper for the backend.
-pub struct ClientImportOperation<Block: BlockT, H: Hasher<Out=Block::Hash>, B: backend::Backend<Block, H>> {
-	op: B::BlockImportOperation,
-	notify_imported: Option<(
-		Block::Hash,
-		BlockOrigin,
-		Block::Header,
-		bool,
-		Option<(
-			StorageCollection,
-			ChildStorageCollection,
-		)>)>,
-	notify_finalized: Vec<Block::Hash>,
-}
-
 /// A source of blockchain events.
 pub trait BlockchainEvents<Block: BlockT> {
 	/// Get block import event stream. Not guaranteed to be fired for every
@@ -183,6 +168,8 @@ pub trait ProvideUncles<Block: BlockT> {
 pub struct ClientInfo<Block: BlockT> {
 	/// Best block hash.
 	pub chain: ChainInfo<Block>,
+	/// State Cache Size currently used by the backend
+	pub used_state_cache_size: Option<usize>,
 }
 
 /// Block status.
@@ -828,28 +815,9 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 		result
 	}
 
-	/// Set a block as best block.
-	pub fn set_head(
-		&self,
-		id: BlockId<Block>
-	) -> error::Result<()> {
-		self.lock_import_and_run(|operation| {
-			self.apply_head(operation, id)
-		})
-	}
-
-	/// Set a block as best block, and apply it to an operation.
-	pub fn apply_head(
-		&self,
-		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
-		id: BlockId<Block>,
-	) -> error::Result<()> {
-		operation.op.mark_head(id)
-	}
-
 	/// Apply a checked and validated block to an operation. If a justification is provided
 	/// then `finalized` *must* be true.
-	pub fn apply_block(
+	fn apply_block(
 		&self,
 		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
 		import_block: BlockImportParams<Block>,
@@ -945,7 +913,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 
 		// find tree route from last finalized to given block.
 		let route_from_finalized = crate::blockchain::tree_route(
-			self.backend.blockchain(),
+			|id| self.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
 			BlockId::Hash(info.finalized_hash),
 			BlockId::Hash(parent_hash),
 		)?;
@@ -1112,7 +1080,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 		}
 
 		let route_from_finalized = crate::blockchain::tree_route(
-			self.backend.blockchain(),
+			|id| self.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
 			BlockId::Hash(last_finalized),
 			BlockId::Hash(block),
 		)?;
@@ -1125,7 +1093,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 		}
 
 		let route_from_best = crate::blockchain::tree_route(
-			self.backend.blockchain(),
+			|id| self.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
 			BlockId::Hash(best_block),
 			BlockId::Hash(block),
 		)?;
@@ -1228,68 +1196,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 		Ok(())
 	}
 
-	/// Apply auxiliary data insertion into an operation.
-	pub fn apply_aux<
-		'a,
-		'b: 'a,
-		'c: 'a,
-		I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
-		D: IntoIterator<Item=&'a &'b [u8]>,
-	>(
-		&self,
-		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
-		insert: I,
-		delete: D
-	) -> error::Result<()> {
-		operation.op.insert_aux(
-			insert.into_iter()
-				.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
-				.chain(delete.into_iter().map(|k| (k.to_vec(), None)))
-		)
-	}
-
-	/// Mark all blocks up to given as finalized in operation. If a
-	/// justification is provided it is stored with the given finalized
-	/// block (any other finalized blocks are left unjustified).
-	///
-	/// If the block being finalized is on a different fork from the current
-	/// best block the finalized block is set as best, this might be slightly
-	/// innacurate (i.e. outdated), usages that require determining an accurate
-	/// best block should use `SelectChain` instead of the client.
-	pub fn apply_finality(
-		&self,
-		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
-		id: BlockId<Block>,
-		justification: Option<Justification>,
-		notify: bool,
-	) -> error::Result<()> {
-		let last_best = self.backend.blockchain().info().best_hash;
-		let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
-		self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
-	}
-
-	/// Finalize a block. This will implicitly finalize all blocks up to it and
-	/// fire finality notifications.
-	///
-	/// If the block being finalized is on a different fork from the current
-	/// best block the finalized block is set as best, this might be slightly
-	/// innacurate (i.e. outdated), usages that require determining an accurate
-	/// best block should use `SelectChain` instead of the client.
-	///
-	/// Pass a flag to indicate whether finality notifications should be propagated.
-	/// This is usually tied to some synchronization state, where we don't send notifications
-	/// while performing major synchronization work.
-	pub fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
-		self.lock_import_and_run(|operation| {
-			let last_best = self.backend.blockchain().info().best_hash;
-			let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
-			self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
-		}).map_err(|e| {
-			warn!("Block finalization error:\n{:?}", e);
-			e
-		})
-	}
-
 	/// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were
 	/// successfully reverted.
 	pub fn revert(&self, n: NumberFor<Block>) -> error::Result<NumberFor<Block>> {
@@ -1301,6 +1207,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 		let info = self.backend.blockchain().info();
 		ClientInfo {
 			chain: info,
+			used_state_cache_size: self.backend.used_state_cache_size(),
 		}
 	}
 
@@ -1359,7 +1266,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
 		let load_header = |id: Block::Hash| -> error::Result<Block::Header> {
 			match self.backend.blockchain().header(BlockId::Hash(id))? {
 				Some(hdr) => Ok(hdr),
-				None => Err(Error::UnknownBlock(format!("Unknown block {:?}", id))),
+				None => Err(Error::UnknownBlock(format!("{:?}", id))),
 			}
 		};
 
@@ -1446,6 +1353,33 @@ impl<B, E, Block, RA> ChainHeaderBackend<Block> for Client<B, E, Block, RA> wher
 	}
 }
 
+impl<B, E, Block, RA> ChainHeaderBackend<Block> for &Client<B, E, Block, RA> where
+	B: backend::Backend<Block, Blake2Hasher>,
+	E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
+	Block: BlockT<Hash=H256>,
+	RA: Send + Sync,
+{
+	fn header(&self, id: BlockId<Block>) -> error::Result<Option<Block::Header>> {
+		(**self).backend.blockchain().header(id)
+	}
+
+	fn info(&self) -> blockchain::Info<Block> {
+		(**self).backend.blockchain().info()
+	}
+
+	fn status(&self, id: BlockId<Block>) -> error::Result<blockchain::BlockStatus> {
+		(**self).status(id)
+	}
+
+	fn number(&self, hash: Block::Hash) -> error::Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
+		(**self).number(hash)
+	}
+
+	fn hash(&self, number: NumberFor<Block>) -> error::Result<Option<Block::Hash>> {
+		(**self).hash(number)
+	}
+}
+
 impl<B, E, Block, RA> ProvideCache<Block> for Client<B, E, Block, RA> where
 	B: backend::Backend<Block, Blake2Hasher>,
 	Block: BlockT<Hash=H256>,
@@ -1601,6 +1535,50 @@ impl<B, E, Block, RA> consensus::BlockImport<Block> for Client<B, E, Block, RA>
 	}
 }
 
+impl<B, E, Block, RA> Finalizer<Block, Blake2Hasher, B> for Client<B, E, Block, RA> where 
+	B: backend::Backend<Block, Blake2Hasher>,
+	E: CallExecutor<Block, Blake2Hasher>,
+	Block: BlockT<Hash=H256>,
+{
+	fn apply_finality(
+		&self,
+		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
+		id: BlockId<Block>,
+		justification: Option<Justification>,
+		notify: bool,
+	) -> error::Result<()> {
+		let last_best = self.backend.blockchain().info().best_hash;
+		let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?;
+		self.apply_finality_with_block_hash(operation, to_finalize_hash, justification, last_best, notify)
+	}
+
+	fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
+		self.lock_import_and_run(|operation| {
+			self.apply_finality(operation, id, justification, notify)
+		})
+	}
+}
+
+impl<B, E, Block, RA> Finalizer<Block, Blake2Hasher, B> for &Client<B, E, Block, RA> where 
+	B: backend::Backend<Block, Blake2Hasher>,
+	E: CallExecutor<Block, Blake2Hasher>,
+	Block: BlockT<Hash=H256>,
+{
+	fn apply_finality(
+		&self,
+		operation: &mut ClientImportOperation<Block, Blake2Hasher, B>,
+		id: BlockId<Block>,
+		justification: Option<Justification>,
+		notify: bool,
+	) -> error::Result<()> {
+		(**self).apply_finality(operation, id, justification, notify)
+	}
+
+	fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
+		(**self).finalize_block(id, justification, notify)
+	}
+}
+
 impl<B, E, Block, RA> BlockchainEvents<Block> for Client<B, E, Block, RA>
 where
 	E: CallExecutor<Block, Blake2Hasher>,
@@ -1847,7 +1825,7 @@ impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
 		// layer, one can always use atomic operations to make sure
 		// import is only locked once.
 		self.lock_import_and_run(|operation| {
-			self.apply_aux(operation, insert, delete)
+			apply_aux(operation, insert, delete)
 		})
 	}
 	/// Query auxiliary data from key-value store.
@@ -1856,6 +1834,49 @@ impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
 	}
 }
 
+
+impl<B, E, Block, RA> backend::AuxStore for &Client<B, E, Block, RA>
+	where
+		B: backend::Backend<Block, Blake2Hasher>,
+		E: CallExecutor<Block, Blake2Hasher>,
+		Block: BlockT<Hash=H256>,
+{ 
+
+	fn insert_aux<
+		'a,
+		'b: 'a,
+		'c: 'a,
+		I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
+		D: IntoIterator<Item=&'a &'b [u8]>,
+	>(&self, insert: I, delete: D) -> error::Result<()> {
+		(**self).insert_aux(insert, delete)
+	}
+
+	fn get_aux(&self, key: &[u8]) -> error::Result<Option<Vec<u8>>> {
+		(**self).get_aux(key)
+	}
+}
+
+/// Helper function to apply auxiliary data insertion into an operation.
+pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, H, D, I>(
+	operation: &mut ClientImportOperation<Block, H, B>,
+	insert: I,
+	delete: D
+) -> error::Result<()>
+where
+	Block: BlockT,
+	H: Hasher<Out=Block::Hash>,
+	B: backend::Backend<Block, H>,
+	I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
+	D: IntoIterator<Item=&'a &'b [u8]>,
+{
+	operation.op.insert_aux(
+		insert.into_iter()
+			.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
+			.chain(delete.into_iter().map(|k| (k.to_vec(), None)))
+	)
+}
+
 /// Utility methods for the client.
 pub mod utils {
 	use super::*;
@@ -1891,8 +1912,7 @@ pub mod utils {
 			}
 
 			let tree_route = blockchain::tree_route(
-				#[allow(deprecated)]
-				client.backend().blockchain(),
+				|id| client.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))),
 				BlockId::Hash(*hash),
 				BlockId::Hash(*base),
 			)?;
@@ -1911,7 +1931,6 @@ pub(crate) mod tests {
 	use consensus::{BlockOrigin, SelectChain};
 	use test_client::{
 		prelude::*,
-		client::backend::Backend as TestBackend,
 		client_db::{Backend, DatabaseSettings, PruningMode},
 		runtime::{self, Block, Transfer, RuntimeApi, TestAPI},
 	};
@@ -2572,8 +2591,6 @@ pub(crate) mod tests {
 
 	#[test]
 	fn import_with_justification() {
-		use test_client::blockchain::Backend;
-
 		let client = test_client::new();
 
 		// G -> A1
@@ -2589,33 +2606,29 @@ pub(crate) mod tests {
 		let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default()).unwrap().bake().unwrap();
 		client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap();
 
-		#[allow(deprecated)]
-		let blockchain = client.backend().blockchain();
-
 		assert_eq!(
-			blockchain.last_finalized().unwrap(),
+			client.info().chain.finalized_hash,
 			a3.hash(),
 		);
 
 		assert_eq!(
-			blockchain.justification(BlockId::Hash(a3.hash())).unwrap(),
+			client.justification(&BlockId::Hash(a3.hash())).unwrap(),
 			Some(justification),
 		);
 
 		assert_eq!(
-			blockchain.justification(BlockId::Hash(a1.hash())).unwrap(),
+			client.justification(&BlockId::Hash(a1.hash())).unwrap(),
 			None,
 		);
 
 		assert_eq!(
-			blockchain.justification(BlockId::Hash(a2.hash())).unwrap(),
+			client.justification(&BlockId::Hash(a2.hash())).unwrap(),
 			None,
 		);
 	}
 
 	#[test]
 	fn importing_diverged_finalized_block_should_trigger_reorg() {
-		use test_client::blockchain::HeaderBackend;
 
 		let client = test_client::new();
 
@@ -2639,12 +2652,9 @@ pub(crate) mod tests {
 		// create but don't import B1 just yet
 		let b1 = b1.bake().unwrap();
 
-		#[allow(deprecated)]
-		let blockchain = client.backend().blockchain();
-
 		// A2 is the current best since it's the longest chain
 		assert_eq!(
-			blockchain.info().best_hash,
+			client.info().chain.best_hash,
 			a2.hash(),
 		);
 
@@ -2653,19 +2663,18 @@ pub(crate) mod tests {
 		client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap();
 
 		assert_eq!(
-			blockchain.info().best_hash,
+			client.info().chain.best_hash,
 			b1.hash(),
 		);
 
 		assert_eq!(
-			blockchain.info().finalized_hash,
+			client.info().chain.finalized_hash,
 			b1.hash(),
 		);
 	}
 
 	#[test]
 	fn finalizing_diverged_block_should_trigger_reorg() {
-		use test_client::blockchain::HeaderBackend;
 
 		let (client, select_chain) = TestClientBuilder::new().build_with_longest_chain();
 
@@ -2692,29 +2701,26 @@ pub(crate) mod tests {
 		let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default()).unwrap().bake().unwrap();
 		client.import(BlockOrigin::Own, b2.clone()).unwrap();
 
-		#[allow(deprecated)]
-		let blockchain = client.backend().blockchain();
-
 		// A2 is the current best since it's the longest chain
 		assert_eq!(
-			blockchain.info().best_hash,
+			client.info().chain.best_hash,
 			a2.hash(),
 		);
 
 		// we finalize block B1 which is on a different branch from current best
 		// which should trigger a re-org.
-		client.finalize_block(BlockId::Hash(b1.hash()), None, false).unwrap();
+		client.finalize_block(BlockId::Hash(b1.hash()), None).unwrap();
 
 		// B1 should now be the latest finalized
 		assert_eq!(
-			blockchain.info().finalized_hash,
+			client.info().chain.finalized_hash,
 			b1.hash(),
 		);
 
 		// and B1 should be the new best block (`finalize_block` as no way of
 		// knowing about B2)
 		assert_eq!(
-			blockchain.info().best_hash,
+			client.info().chain.best_hash,
 			b1.hash(),
 		);
 
@@ -2733,7 +2739,7 @@ pub(crate) mod tests {
 		client.import(BlockOrigin::Own, b3.clone()).unwrap();
 
 		assert_eq!(
-			blockchain.info().best_hash,
+			client.info().chain.best_hash,
 			b3.hash(),
 		);
 	}
@@ -2795,44 +2801,6 @@ pub(crate) mod tests {
 		assert_eq!(980, current_balance());
 	}
 
-	#[test]
-	fn state_reverted_on_set_head() {
-		let _ = env_logger::try_init();
-		let client = test_client::new();
-
-		let current_balance = ||
-			client.runtime_api().balance_of(
-				&BlockId::number(client.info().chain.best_number), AccountKeyring::Alice.into()
-			).unwrap();
-
-		// G -> A1
-		//   \
-		//    -> B1
-		let mut a1 = client.new_block_at(&BlockId::Number(0), Default::default()).unwrap();
-		a1.push_transfer(Transfer {
-			from: AccountKeyring::Alice.into(),
-			to: AccountKeyring::Bob.into(),
-			amount: 10,
-			nonce: 0,
-		}).unwrap();
-		let a1 = a1.bake().unwrap();
-		client.import(BlockOrigin::Own, a1.clone()).unwrap();
-
-		let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default()).unwrap();
-		b1.push_transfer(Transfer {
-			from: AccountKeyring::Alice.into(),
-			to: AccountKeyring::Ferdie.into(),
-			amount: 50,
-			nonce: 0,
-		}).unwrap();
-		let b1 = b1.bake().unwrap();
-		client.import(BlockOrigin::Own, b1.clone()).unwrap();
-		assert_eq!(990, current_balance());
-		// Set B1 as new best
-		client.set_head(BlockId::hash(b1.hash())).unwrap();
-		assert_eq!(950, current_balance());
-	}
-
 	#[test]
 	fn doesnt_import_blocks_that_revert_finality() {
 		let _ = env_logger::try_init();
@@ -2885,7 +2853,7 @@ pub(crate) mod tests {
 
 		// we will finalize A2 which should make it impossible to import a new
 		// B3 at the same height but that doesnt't include it
-		client.finalize_block(BlockId::Hash(a2.hash()), None, false).unwrap();
+		client.finalize_block(BlockId::Hash(a2.hash()), None).unwrap();
 
 		let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default())
 			.unwrap().bake().unwrap();
diff --git a/substrate/core/client/src/in_mem.rs b/substrate/core/client/src/in_mem.rs
index 1a6881ea8db..f450ceebe2a 100644
--- a/substrate/core/client/src/in_mem.rs
+++ b/substrate/core/client/src/in_mem.rs
@@ -223,7 +223,7 @@ impl<Block: BlockT> Blockchain<Block> {
 				None
 			} else {
 				let route = crate::blockchain::tree_route(
-					self,
+					|id| self.header(id)?.ok_or_else(|| error::Error::UnknownBlock(format!("{:?}", id))),
 					BlockId::Hash(best_hash),
 					BlockId::Hash(*header.parent_hash()),
 				)?;
diff --git a/substrate/core/client/src/lib.rs b/substrate/core/client/src/lib.rs
index 99cbecbe894..1313b8b80eb 100644
--- a/substrate/core/client/src/lib.rs
+++ b/substrate/core/client/src/lib.rs
@@ -116,7 +116,7 @@ pub use crate::client::{
 	BlockBody, BlockStatus, ImportNotifications, FinalityNotifications, BlockchainEvents,
 	BlockImportNotification, Client, ClientInfo, ExecutionStrategies, FinalityNotification,
 	LongestChain, BlockOf, ProvideUncles,
-	utils,
+	utils, apply_aux,
 };
 #[cfg(feature = "std")]
 pub use crate::notifications::{StorageEventStream, StorageChangeSet};
diff --git a/substrate/core/consensus/aura/src/lib.rs b/substrate/core/consensus/aura/src/lib.rs
index 406874eafbc..a2cf9e83bd8 100644
--- a/substrate/core/consensus/aura/src/lib.rs
+++ b/substrate/core/consensus/aura/src/lib.rs
@@ -680,7 +680,7 @@ mod tests {
 	use parking_lot::Mutex;
 	use tokio::runtime::current_thread;
 	use keyring::sr25519::Keyring;
-	use client::{LongestChain, BlockchainEvents};
+	use client::BlockchainEvents;
 	use test_client;
 	use aura_primitives::sr25519::AuthorityPair;
 
@@ -744,7 +744,7 @@ mod tests {
 			-> Self::Verifier
 		{
 			match client {
-				PeersClient::Full(client) => {
+				PeersClient::Full(client, _) => {
 					let slot_duration = SlotDuration::get_or_compute(&*client)
 						.expect("slot duration available");
 					let inherent_data_providers = InherentDataProviders::new();
@@ -761,7 +761,7 @@ mod tests {
 						phantom: Default::default(),
 					}
 				},
-				PeersClient::Light(_) => unreachable!("No (yet) tests for light client + Aura"),
+				PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"),
 			}
 		}
 
@@ -796,18 +796,17 @@ mod tests {
 		let mut runtime = current_thread::Runtime::new().unwrap();
 		let mut keystore_paths = Vec::new();
 		for (peer_id, key) in peers {
+			let mut net = net.lock();
+			let peer = net.peer(*peer_id);
+			let client = peer.client().as_full().expect("full clients are created").clone();
+			let select_chain = peer.select_chain().expect("full client has a select chain");
 			let keystore_path = tempfile::tempdir().expect("Creates keystore path");
 			let keystore = keystore::Store::open(keystore_path.path(), None).expect("Creates keystore.");
 
 			keystore.write().insert_ephemeral_from_seed::<AuthorityPair>(&key.to_seed())
 				.expect("Creates authority key");
 			keystore_paths.push(keystore_path);
-
-			let client = net.lock().peer(*peer_id).client().as_full().expect("full clients are created").clone();
-			#[allow(deprecated)]
-			let select_chain = LongestChain::new(
-				client.backend().clone(),
-			);
+			
 			let environ = DummyFactory(client.clone());
 			import_notifications.push(
 				client.import_notification_stream()
diff --git a/substrate/core/consensus/babe/src/lib.rs b/substrate/core/consensus/babe/src/lib.rs
index 6d049dd6be1..c9c80eb1e92 100644
--- a/substrate/core/consensus/babe/src/lib.rs
+++ b/substrate/core/consensus/babe/src/lib.rs
@@ -857,8 +857,7 @@ impl<B, E, Block, RA, PRA, T> Verifier<Block> for BabeVerifier<B, E, Block, RA,
 				// chain.
 				let new_best = {
 					let (last_best, last_best_number) = {
-						#[allow(deprecated)]
-						let info = self.client.backend().blockchain().info();
+						let info = self.client.info().chain;
 						(info.best_hash, info.best_number)
 					};
 
@@ -1321,8 +1320,7 @@ impl<B, E, Block, I, RA, PRA> BlockImport<Block> for BabeBlockImport<B, E, Block
 
 		// early exit if block already in chain, otherwise the check for
 		// epoch changes will error when trying to re-import an epoch change
-		#[allow(deprecated)]
-		match self.client.backend().blockchain().status(BlockId::Hash(hash)) {
+		match self.client.status(BlockId::Hash(hash)) {
 			Ok(blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
 			Ok(blockchain::BlockStatus::Unknown) => {},
 			Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()),
@@ -1496,8 +1494,7 @@ pub fn import_queue<B, E, Block: BlockT<Hash=H256>, I, RA, PRA, T>(
 		transaction_pool,
 	};
 
-	#[allow(deprecated)]
-	let epoch_changes = aux_schema::load_epoch_changes(&**client.backend())?;
+	let epoch_changes = aux_schema::load_epoch_changes(&*client)?;
 
 	let block_import = BabeBlockImport::new(
 		client.clone(),
diff --git a/substrate/core/consensus/babe/src/tests.rs b/substrate/core/consensus/babe/src/tests.rs
index 8635473a593..2410cadbd59 100644
--- a/substrate/core/consensus/babe/src/tests.rs
+++ b/substrate/core/consensus/babe/src/tests.rs
@@ -22,7 +22,7 @@
 use super::*;
 
 use babe_primitives::AuthorityPair;
-use client::{LongestChain, block_builder::BlockBuilder};
+use client::block_builder::BlockBuilder;
 use consensus_common::NoNetwork as DummyOracle;
 use network::test::*;
 use network::test::{Block as TestBlock, PeersClient};
@@ -203,12 +203,16 @@ fn run_one_test() {
 	let mut runtime = current_thread::Runtime::new().unwrap();
 	let mut keystore_paths = Vec::new();
 	for (peer_id, seed) in peers {
+		let mut net = net.lock();
+		let peer = net.peer(*peer_id);
+		let client = peer.client().as_full().expect("Only full clients are used in tests").clone();
+		let select_chain = peer.select_chain().expect("Full client has select_chain");
+		
 		let keystore_path = tempfile::tempdir().expect("Creates keystore path");
 		let keystore = keystore::Store::open(keystore_path.path(), None).expect("Creates keystore");
 		keystore.write().insert_ephemeral_from_seed::<AuthorityPair>(seed).expect("Generates authority key");
 		keystore_paths.push(keystore_path);
 
-		let client = net.lock().peer(*peer_id).client().as_full().unwrap();
 		let environ = DummyFactory(client.clone());
 		import_notifications.push(
 			client.import_notification_stream()
@@ -223,9 +227,6 @@ fn run_one_test() {
 			&inherent_data_providers, config.get()
 		).expect("Registers babe inherent data provider");
 
-		#[allow(deprecated)]
-		let select_chain = LongestChain::new(client.backend().clone());
-
 		runtime.spawn(start_babe(BabeParams {
 			config,
 			block_import: client.clone(),
diff --git a/substrate/core/finality-grandpa/src/environment.rs b/substrate/core/finality-grandpa/src/environment.rs
index c0474cb0368..faabf705fb9 100644
--- a/substrate/core/finality-grandpa/src/environment.rs
+++ b/substrate/core/finality-grandpa/src/environment.rs
@@ -26,8 +26,9 @@ use tokio_timer::Delay;
 use parking_lot::RwLock;
 
 use client::{
-	backend::Backend, BlockchainEvents, CallExecutor, Client, error::Error as ClientError,
-	utils::is_descendent_of,
+	backend::Backend, apply_aux, BlockchainEvents, CallExecutor,
+	Client, error::Error as ClientError, utils::is_descendent_of,
+	blockchain::HeaderBackend, backend::Finalizer,
 };
 use grandpa::{
 	BlockNumberOps, Equivocation, Error as GrandpaError, round::State as RoundState,
@@ -498,8 +499,7 @@ pub(crate) fn ancestry<B, Block: BlockT<Hash=H256>, E, RA>(
 	if base == block { return Err(GrandpaError::NotDescendent) }
 
 	let tree_route_res = ::client::blockchain::tree_route(
-		#[allow(deprecated)]
-		client.backend().blockchain(),
+		|id| client.header(&id)?.ok_or(client::error::Error::UnknownBlock(format!("{:?}", id))),
 		BlockId::Hash(block),
 		BlockId::Hash(base),
 	);
@@ -632,8 +632,7 @@ where
 				current_rounds,
 			};
 
-			#[allow(deprecated)]
-			crate::aux_schema::write_voter_set_state(&**self.inner.backend(), &set_state)?;
+			crate::aux_schema::write_voter_set_state(&*self.inner, &set_state)?;
 
 			Ok(Some(set_state))
 		})?;
@@ -674,8 +673,7 @@ where
 				current_rounds,
 			};
 
-			#[allow(deprecated)]
-			crate::aux_schema::write_voter_set_state(&**self.inner.backend(), &set_state)?;
+			crate::aux_schema::write_voter_set_state(&*self.inner, &set_state)?;
 
 			Ok(Some(set_state))
 		})?;
@@ -726,8 +724,7 @@ where
 				current_rounds,
 			};
 
-			#[allow(deprecated)]
-			crate::aux_schema::write_voter_set_state(&**self.inner.backend(), &set_state)?;
+			crate::aux_schema::write_voter_set_state(&*self.inner, &set_state)?;
 
 			Ok(Some(set_state))
 		})?;
@@ -785,8 +782,7 @@ where
 				current_rounds,
 			};
 
-			#[allow(deprecated)]
-			crate::aux_schema::write_voter_set_state(&**self.inner.backend(), &set_state)?;
+			crate::aux_schema::write_voter_set_state(&*self.inner, &set_state)?;
 
 			Ok(Some(set_state))
 		})?;
@@ -875,24 +871,15 @@ pub(crate) fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>(
 	E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
 	RA: Send + Sync,
 {
-	use client::blockchain::HeaderBackend;
-
-	#[allow(deprecated)]
-	let blockchain = client.backend().blockchain();
-	let info = blockchain.info();
-	if number <= info.finalized_number && blockchain.hash(number)? == Some(hash) {
-		// We might have a race condition on finality, since we can finalize
-		// through either sync (import justification) or through grandpa gossip.
-		// so let's make sure that this finalization request is no longer stale.
-		// This can also happen after a forced change (triggered by the finality
-		// tracker when finality is stalled), since the voter will be restarted
-		// at the median last finalized block, which can be lower than the local
-		// best finalized block.
-		warn!(target: "afg",
-			"Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}",
-			hash,
-			number,
-			info.finalized_number,
+	let status = client.info().chain;
+	if number <= status.finalized_number && client.hash(number)? == Some(hash) {
+		// This can happen after a forced change (triggered by the finality tracker when finality is stalled), since
+		// the voter will be restarted at the median last finalized block, which can be lower than the local best
+		// finalized block.
+		warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}",
+				hash,
+				number,
+				status.finalized_number,
 		);
 
 		return Ok(());
@@ -929,7 +916,7 @@ pub(crate) fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>(
 
 			let write_result = crate::aux_schema::update_consensus_changes(
 				&*consensus_changes,
-				|insert| client.apply_aux(import_op, insert, &[]),
+				|insert| apply_aux(import_op, insert, &[]),
 			);
 
 			if let Err(e) = write_result {
@@ -1022,7 +1009,7 @@ pub(crate) fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>(
 			let write_result = crate::aux_schema::update_authority_set::<Block, _, _>(
 				&authority_set,
 				new_authorities.as_ref(),
-				|insert| client.apply_aux(import_op, insert, &[]),
+				|insert| apply_aux(import_op, insert, &[]),
 			);
 
 			if let Err(e) = write_result {
@@ -1053,15 +1040,12 @@ pub(crate) fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>(
 
 /// Using the given base get the block at the given height on this chain. The
 /// target block must be an ancestor of base, therefore `height <= base.height`.
-pub(crate) fn canonical_at_height<B, E, Block: BlockT<Hash=H256>, RA>(
-	client: &Client<B, E, Block, RA>,
+pub(crate) fn canonical_at_height<Block: BlockT<Hash=H256>, C: HeaderBackend<Block>>(
+	provider: C,
 	base: (Block::Hash, NumberFor<Block>),
 	base_is_canonical: bool,
 	height: NumberFor<Block>,
-) -> Result<Option<Block::Hash>, ClientError> where
-	B: Backend<Block, Blake2Hasher>,
-	E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
-{
+) -> Result<Option<Block::Hash>, ClientError> {
 	if height > base.1 {
 		return Ok(None);
 	}
@@ -1070,17 +1054,17 @@ pub(crate) fn canonical_at_height<B, E, Block: BlockT<Hash=H256>, RA>(
 		if base_is_canonical {
 			return Ok(Some(base.0));
 		} else {
-			return Ok(client.block_hash(height).unwrap_or(None));
+			return Ok(provider.hash(height).unwrap_or(None));
 		}
 	} else if base_is_canonical {
-		return Ok(client.block_hash(height).unwrap_or(None));
+		return Ok(provider.hash(height).unwrap_or(None));
 	}
 
 	let one = NumberFor::<Block>::one();
 
 	// start by getting _canonical_ block with number at parent position and then iterating
 	// backwards by hash.
-	let mut current = match client.header(&BlockId::Number(base.1 - one))? {
+	let mut current = match provider.header(BlockId::Number(base.1 - one))? {
 		Some(header) => header,
 		_ => return Ok(None),
 	};
@@ -1089,7 +1073,7 @@ pub(crate) fn canonical_at_height<B, E, Block: BlockT<Hash=H256>, RA>(
 	let mut steps = base.1 - height - one;
 
 	while steps > NumberFor::<Block>::zero() {
-		current = match client.header(&BlockId::Hash(*current.parent_hash()))? {
+		current = match provider.header(BlockId::Hash(*current.parent_hash()))? {
 			Some(header) => header,
 			_ => return Ok(None),
 		};
diff --git a/substrate/core/finality-grandpa/src/finality_proof.rs b/substrate/core/finality-grandpa/src/finality_proof.rs
index 6262ad74a7a..4b84ede9334 100644
--- a/substrate/core/finality-grandpa/src/finality_proof.rs
+++ b/substrate/core/finality-grandpa/src/finality_proof.rs
@@ -130,36 +130,31 @@ impl<Block: BlockT> AuthoritySetForFinalityChecker<Block> for Arc<dyn FetchCheck
 }
 
 /// Finality proof provider for serving network requests.
-pub struct FinalityProofProvider<B, E, Block: BlockT<Hash=H256>, RA> {
-	client: Arc<Client<B, E, Block, RA>>,
+pub struct FinalityProofProvider<B,  Block: BlockT<Hash=H256>> {
+	backend: Arc<B>,
 	authority_provider: Arc<dyn AuthoritySetForFinalityProver<Block>>,
 }
 
-impl<B, E, Block: BlockT<Hash=H256>, RA> FinalityProofProvider<B, E, Block, RA>
-	where
-		B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-		RA: Send + Sync,
+impl<B, Block: BlockT<Hash=H256>> FinalityProofProvider<B, Block>
+	where B: Backend<Block, Blake2Hasher> + Send + Sync + 'static
 {
 	/// Create new finality proof provider using:
 	///
-	/// - client for accessing blockchain data;
+	/// - backend for accessing blockchain data;
 	/// - authority_provider for calling and proving runtime methods.
 	pub fn new(
-		client: Arc<Client<B, E, Block, RA>>,
+		backend: Arc<B>,
 		authority_provider: Arc<dyn AuthoritySetForFinalityProver<Block>>,
 	) -> Self {
-		FinalityProofProvider { client, authority_provider }
+		FinalityProofProvider { backend, authority_provider }
 	}
 }
 
-impl<B, E, Block, RA> network::FinalityProofProvider<Block> for FinalityProofProvider<B, E, Block, RA>
+impl<B, Block> network::FinalityProofProvider<Block> for FinalityProofProvider<B, Block>
 	where
 		Block: BlockT<Hash=H256>,
 		NumberFor<Block>: BlockNumberOps,
 		B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-		RA: Send + Sync,
 {
 	fn prove_finality(
 		&self,
@@ -173,8 +168,7 @@ impl<B, E, Block, RA> network::FinalityProofProvider<Block> for FinalityProofPro
 			})?;
 		match request {
 			FinalityProofRequest::Original(request) => prove_finality::<_, _, GrandpaJustification<Block>>(
-				#[allow(deprecated)]
-				&*self.client.backend().blockchain(),
+				&*self.backend.blockchain(),
 				&*self.authority_provider,
 				request.authorities_set_id,
 				request.last_finalized,
diff --git a/substrate/core/finality-grandpa/src/import.rs b/substrate/core/finality-grandpa/src/import.rs
index 7651f9a03d9..d51d0ffd84c 100644
--- a/substrate/core/finality-grandpa/src/import.rs
+++ b/substrate/core/finality-grandpa/src/import.rs
@@ -333,16 +333,10 @@ where
 					// for the canon block the new authority set should start
 					// with. we use the minimum between the median and the local
 					// best finalized block.
-
-					#[allow(deprecated)]
-					let best_finalized_number = self.inner.backend().blockchain().info()
-						.finalized_number;
-
+					let best_finalized_number = self.inner.info().chain.finalized_number;
 					let canon_number = best_finalized_number.min(median_last_finalized_number);
-
-					#[allow(deprecated)]
 					let canon_hash =
-						self.inner.backend().blockchain().header(BlockId::Number(canon_number))
+						self.inner.header(&BlockId::Number(canon_number))
 							.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
 							.expect("the given block number is less or equal than the current best finalized number; \
 									 current best finalized number must exist in chain; qed.")
@@ -414,8 +408,7 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA, SC> BlockImport<Block>
 
 		// early exit if block already in chain, otherwise the check for
 		// authority changes will error when trying to re-import a change block
-		#[allow(deprecated)]
-		match self.inner.backend().blockchain().status(BlockId::Hash(hash)) {
+		match self.inner.status(BlockId::Hash(hash)) {
 			Ok(blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain),
 			Ok(blockchain::BlockStatus::Unknown) => {},
 			Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()),
diff --git a/substrate/core/finality-grandpa/src/justification.rs b/substrate/core/finality-grandpa/src/justification.rs
index a6554b1e90d..b4de8ff0586 100644
--- a/substrate/core/finality-grandpa/src/justification.rs
+++ b/substrate/core/finality-grandpa/src/justification.rs
@@ -18,7 +18,6 @@ use std::collections::{HashMap, HashSet};
 
 use client::{CallExecutor, Client};
 use client::backend::Backend;
-use client::blockchain::HeaderBackend;
 use client::error::Error as ClientError;
 use codec::{Encode, Decode};
 use grandpa::voter_set::VoterSet;
@@ -71,8 +70,7 @@ impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> {
 			loop {
 				if current_hash == commit.target_hash { break; }
 
-				#[allow(deprecated)]
-				match client.backend().blockchain().header(BlockId::Hash(current_hash))? {
+				match client.header(&BlockId::Hash(current_hash))? {
 					Some(current_header) => {
 						if *current_header.number() <= commit.target_number {
 							return error();
diff --git a/substrate/core/finality-grandpa/src/lib.rs b/substrate/core/finality-grandpa/src/lib.rs
index d6f4d768472..36af66af227 100644
--- a/substrate/core/finality-grandpa/src/lib.rs
+++ b/substrate/core/finality-grandpa/src/lib.rs
@@ -359,8 +359,7 @@ where
 	let genesis_hash = chain_info.chain.genesis_hash;
 
 	let persistent_data = aux_schema::load_persistent(
-		#[allow(deprecated)]
-		&**client.backend(),
+		&*client,
 		genesis_hash,
 		<NumberFor<Block>>::zero(),
 		|| {
@@ -452,7 +451,7 @@ fn register_finality_tracker_inherent_data_provider<B, E, Block: BlockT<Hash=H25
 			.register_provider(srml_finality_tracker::InherentDataProvider::new(move || {
 				#[allow(deprecated)]
 				{
-					let info = client.backend().blockchain().info();
+					let info = client.info().chain;
 					telemetry!(CONSENSUS_INFO; "afg.finalized";
 						"finalized_number" => ?info.finalized_number,
 						"finalized_hash" => ?info.finalized_hash,
@@ -693,8 +692,7 @@ where
 						(new.canon_hash, new.canon_number),
 					);
 
-					#[allow(deprecated)]
-					aux_schema::write_voter_set_state(&**self.env.inner.backend(), &set_state)?;
+					aux_schema::write_voter_set_state(&*self.env.inner, &set_state)?;
 					Ok(Some(set_state))
 				})?;
 
@@ -722,8 +720,7 @@ where
 					let completed_rounds = voter_set_state.completed_rounds();
 					let set_state = VoterSetState::Paused { completed_rounds };
 
-					#[allow(deprecated)]
-					aux_schema::write_voter_set_state(&**self.env.inner.backend(), &set_state)?;
+					aux_schema::write_voter_set_state(&*self.env.inner, &set_state)?;
 					Ok(Some(set_state))
 				})?;
 
diff --git a/substrate/core/finality-grandpa/src/light_import.rs b/substrate/core/finality-grandpa/src/light_import.rs
index 4d5381f1cc4..6c86c6b38d7 100644
--- a/substrate/core/finality-grandpa/src/light_import.rs
+++ b/substrate/core/finality-grandpa/src/light_import.rs
@@ -21,7 +21,7 @@ use parking_lot::RwLock;
 
 use client::{
 	CallExecutor, Client,
-	backend::{AuxStore, Backend},
+	backend::{AuxStore, Backend, Finalizer},
 	blockchain::HeaderBackend,
 	error::Error as ClientError,
 };
@@ -54,6 +54,7 @@ const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes";
 /// Create light block importer.
 pub fn light_block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
 	client: Arc<Client<B, E, Block, RA>>,
+	backend: Arc<B>,
 	authority_set_provider: Arc<dyn AuthoritySetForFinalityChecker<Block>>,
 	api: Arc<PRA>,
 ) -> Result<GrandpaLightBlockImport<B, E, Block, RA>, ClientError>
@@ -65,10 +66,10 @@ pub fn light_block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
 		PRA::Api: GrandpaApi<Block>,
 {
 	let info = client.info();
-	#[allow(deprecated)]
-	let import_data = load_aux_import_data(info.chain.finalized_hash, &**client.backend(), api)?;
+	let import_data = load_aux_import_data(info.chain.finalized_hash, &*client, api)?;
 	Ok(GrandpaLightBlockImport {
 		client,
+		backend,
 		authority_set_provider,
 		data: Arc::new(RwLock::new(import_data)),
 	})
@@ -81,6 +82,7 @@ pub fn light_block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
 /// - fetching finality proofs for blocks that are enacting consensus changes.
 pub struct GrandpaLightBlockImport<B, E, Block: BlockT<Hash=H256>, RA> {
 	client: Arc<Client<B, E, Block, RA>>,
+	backend: Arc<B>,
 	authority_set_provider: Arc<dyn AuthoritySetForFinalityChecker<Block>>,
 	data: Arc<RwLock<LightImportData<Block>>>,
 }
@@ -89,6 +91,7 @@ impl<B, E, Block: BlockT<Hash=H256>, RA> Clone for GrandpaLightBlockImport<B, E,
 	fn clone(&self) -> Self {
 		GrandpaLightBlockImport {
 			client: self.client.clone(),
+			backend: self.backend.clone(),
 			authority_set_provider: self.authority_set_provider.clone(),
 			data: self.data.clone(),
 		}
@@ -131,7 +134,7 @@ impl<B, E, Block: BlockT<Hash=H256>, RA> BlockImport<Block>
 		block: BlockImportParams<Block>,
 		new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
 	) -> Result<ImportResult, Self::Error> {
-		do_import_block::<_, _, _, _, GrandpaJustification<Block>>(
+		do_import_block::<_, _, _, GrandpaJustification<Block>>(
 			&*self.client, &mut *self.data.write(), block, new_cache
 		)
 	}
@@ -176,8 +179,9 @@ impl<B, E, Block: BlockT<Hash=H256>, RA> FinalityProofImport<Block>
 		finality_proof: Vec<u8>,
 		verifier: &mut dyn Verifier<Block>,
 	) -> Result<(Block::Hash, NumberFor<Block>), Self::Error> {
-		do_import_finality_proof::<_, _, _, _, GrandpaJustification<Block>>(
+		do_import_finality_proof::<_, _, _, GrandpaJustification<Block>>(
 			&*self.client,
+			self.backend.clone(),
 			&*self.authority_set_provider,
 			&mut *self.data.write(),
 			hash,
@@ -227,16 +231,19 @@ impl<B: BlockT<Hash=H256>> FinalityProofRequestBuilder<B> for GrandpaFinalityPro
 }
 
 /// Try to import new block.
-fn do_import_block<B, E, Block: BlockT<Hash=H256>, RA, J>(
-	mut client: &Client<B, E, Block, RA>,
+fn do_import_block<B, C, Block: BlockT<Hash=H256>, J>(
+	mut client: C,
 	data: &mut LightImportData<Block>,
 	mut block: BlockImportParams<Block>,
 	new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
 ) -> Result<ImportResult, ConsensusError>
 	where
+		C: HeaderBackend<Block>
+			+ AuxStore
+			+ Finalizer<Block, Blake2Hasher, B>
+			+ BlockImport<Block>
+			+ Clone,
 		B: Backend<Block, Blake2Hasher> + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-		RA: Send + Sync,
 		NumberFor<Block>: grandpa::BlockNumberOps,
 		DigestFor<Block>: Encode,
 		J: ProvableJustification<Block::Header>,
@@ -247,7 +254,7 @@ fn do_import_block<B, E, Block: BlockT<Hash=H256>, RA, J>(
 	// we don't want to finalize on `inner.import_block`
 	let justification = block.justification.take();
 	let enacts_consensus_change = !new_cache.is_empty();
-	let import_result = BlockImport::import_block(&mut client, block, new_cache);
+	let import_result = client.import_block(block, new_cache);
 
 	let mut imported_aux = match import_result {
 		Ok(ImportResult::Imported(aux)) => aux,
@@ -264,7 +271,7 @@ fn do_import_block<B, E, Block: BlockT<Hash=H256>, RA, J>(
 				hash,
 			);
 
-			do_import_justification::<_, _, _, _, J>(client, data, hash, number, justification)
+			do_import_justification::<_, _, _, J>(client, data, hash, number, justification)
 		},
 		None if enacts_consensus_change => {
 			trace!(
@@ -283,8 +290,9 @@ fn do_import_block<B, E, Block: BlockT<Hash=H256>, RA, J>(
 }
 
 /// Try to import finality proof.
-fn do_import_finality_proof<B, E, Block: BlockT<Hash=H256>, RA, J>(
-	client: &Client<B, E, Block, RA>,
+fn do_import_finality_proof<B, C, Block: BlockT<Hash=H256>, J>(
+	client: C,
+	backend: Arc<B>,
 	authority_set_provider: &dyn AuthoritySetForFinalityChecker<Block>,
 	data: &mut LightImportData<Block>,
 	_hash: Block::Hash,
@@ -293,9 +301,12 @@ fn do_import_finality_proof<B, E, Block: BlockT<Hash=H256>, RA, J>(
 	verifier: &mut dyn Verifier<Block>,
 ) -> Result<(Block::Hash, NumberFor<Block>), ConsensusError>
 	where
+		C: HeaderBackend<Block>
+			+ AuxStore
+			+ Finalizer<Block, Blake2Hasher, B>
+			+ BlockImport<Block>
+			+ Clone,
 		B: Backend<Block, Blake2Hasher> + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-		RA: Send + Sync,
 		DigestFor<Block>: Encode,
 		NumberFor<Block>: grandpa::BlockNumberOps,
 		J: ProvableJustification<Block::Header>,
@@ -303,8 +314,7 @@ fn do_import_finality_proof<B, E, Block: BlockT<Hash=H256>, RA, J>(
 	let authority_set_id = data.authority_set.set_id();
 	let authorities = data.authority_set.authorities();
 	let finality_effects = crate::finality_proof::check_finality_proof(
-		#[allow(deprecated)]
-		&*client.backend().blockchain(),
+		backend.blockchain(),
 		authority_set_id,
 		authorities,
 		authority_set_provider,
@@ -322,13 +332,12 @@ fn do_import_finality_proof<B, E, Block: BlockT<Hash=H256>, RA, J>(
 		if let Some(authorities) = new_authorities {
 			cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode());
 		}
-		do_import_block::<_, _, _, _, J>(client, data, block_to_import, cache)?;
+		do_import_block::<_, _, _, J>(client.clone(), data, block_to_import, cache)?;
 	}
 
 	// try to import latest justification
 	let finalized_block_hash = finality_effects.block;
-	#[allow(deprecated)]
-	let finalized_block_number = client.backend().blockchain()
+	let finalized_block_number = backend.blockchain()
 		.expect_block_number_from_id(&BlockId::Hash(finality_effects.block))
 		.map_err(|e| ConsensusError::ClientImport(e.to_string()))?;
 	do_finalize_block(
@@ -349,17 +358,19 @@ fn do_import_finality_proof<B, E, Block: BlockT<Hash=H256>, RA, J>(
 }
 
 /// Try to import justification.
-fn do_import_justification<B, E, Block: BlockT<Hash=H256>, RA, J>(
-	client: &Client<B, E, Block, RA>,
+fn do_import_justification<B, C, Block: BlockT<Hash=H256>, J>(
+	client: C,
 	data: &mut LightImportData<Block>,
 	hash: Block::Hash,
 	number: NumberFor<Block>,
 	justification: Justification,
 ) -> Result<ImportResult, ConsensusError>
 	where
+		C: HeaderBackend<Block>
+			+ AuxStore
+			+ Finalizer<Block, Blake2Hasher, B>
+			+ Clone,
 		B: Backend<Block, Blake2Hasher> + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-		RA: Send + Sync,
 		NumberFor<Block>: grandpa::BlockNumberOps,
 		J: ProvableJustification<Block::Header>,
 {
@@ -418,17 +429,19 @@ fn do_import_justification<B, E, Block: BlockT<Hash=H256>, RA, J>(
 }
 
 /// Finalize the block.
-fn do_finalize_block<B, E, Block: BlockT<Hash=H256>, RA>(
-	client: &Client<B, E, Block, RA>,
+fn do_finalize_block<B, C, Block: BlockT<Hash=H256>>(
+	client: C,
 	data: &mut LightImportData<Block>,
 	hash: Block::Hash,
 	number: NumberFor<Block>,
 	justification: Justification,
 ) -> Result<ImportResult, ConsensusError>
 	where
+		C: HeaderBackend<Block>
+			+ AuxStore
+			+ Finalizer<Block, Blake2Hasher, B>
+			+ Clone,
 		B: Backend<Block, Blake2Hasher> + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-		RA: Send + Sync,
 		NumberFor<Block>: grandpa::BlockNumberOps,
 {
 	// finalize the block
@@ -439,7 +452,7 @@ fn do_finalize_block<B, E, Block: BlockT<Hash=H256>, RA>(
 
 	// forget obsoleted consensus changes
 	let consensus_finalization_res = data.consensus_changes
-		.finalize((number, hash), |at_height| canonical_at_height(&client, (hash, number), true, at_height));
+		.finalize((number, hash), |at_height| canonical_at_height(client.clone(), (hash, number), true, at_height));
 	match consensus_finalization_res {
 		Ok((true, _)) => require_insert_aux(
 			&client,
@@ -506,20 +519,14 @@ fn load_aux_import_data<B, Block: BlockT<Hash=H256>, PRA>(
 }
 
 /// Insert into aux store. If failed, return error && show inconsistency warning.
-fn require_insert_aux<T: Encode, B, E, Block: BlockT<Hash=H256>, RA>(
-	client: &Client<B, E, Block, RA>,
+fn require_insert_aux<T: Encode, A: AuxStore>(
+	store: &A,
 	key: &[u8],
 	value: &T,
 	value_type: &str,
-) -> Result<(), ConsensusError>
-	where
-		B: Backend<Block, Blake2Hasher> + 'static,
-		E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
-{
-	#[allow(deprecated)]
-	let backend = &**client.backend();
+) -> Result<(), ConsensusError> {
 	let encoded = value.encode();
-	let update_res = Backend::insert_aux(backend, &[(key, &encoded[..])], &[]);
+	let update_res = store.insert_aux(&[(key, &encoded[..])], &[]);
 	if let Err(error) = update_res {
 		return Err(on_post_finalization_error(error, value_type));
 	}
@@ -617,6 +624,7 @@ pub mod tests {
 	/// Creates light block import that ignores justifications that came outside of finality proofs.
 	pub fn light_block_import_without_justifications<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
 		client: Arc<Client<B, E, Block, RA>>,
+		backend: Arc<B>,
 		authority_set_provider: Arc<dyn AuthoritySetForFinalityChecker<Block>>,
 		api: Arc<PRA>,
 	) -> Result<NoJustificationsImport<B, E, Block, RA>, ClientError>
@@ -627,14 +635,14 @@ pub mod tests {
 			PRA: ProvideRuntimeApi,
 			PRA::Api: GrandpaApi<Block>,
 	{
-		light_block_import(client, authority_set_provider, api).map(NoJustificationsImport)
+		light_block_import(client, backend, authority_set_provider, api).map(NoJustificationsImport)
 	}
 
 	fn import_block(
 		new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
 		justification: Option<Justification>,
 	) -> ImportResult {
-		let client = test_client::new_light();
+		let (client, _backend) = test_client::new_light();
 		let mut import_data = LightImportData {
 			last_finalized: Default::default(),
 			authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]),
@@ -656,7 +664,7 @@ pub mod tests {
 			auxiliary: Vec::new(),
 			fork_choice: ForkChoiceStrategy::LongestChain,
 		};
-		do_import_block::<_, _, _, _, TestJustification>(
+		do_import_block::<_, _, _, TestJustification>(
 			&client,
 			&mut import_data,
 			block,
diff --git a/substrate/core/finality-grandpa/src/observer.rs b/substrate/core/finality-grandpa/src/observer.rs
index 8a2d539f496..39eeafcb1b1 100644
--- a/substrate/core/finality-grandpa/src/observer.rs
+++ b/substrate/core/finality-grandpa/src/observer.rs
@@ -301,8 +301,7 @@ where
 				let completed_rounds = self.persistent_data.set_state.read().completed_rounds();
 				let set_state = VoterSetState::Paused { completed_rounds };
 
-				#[allow(deprecated)]
-				crate::aux_schema::write_voter_set_state(&**self.client.backend(), &set_state)?;
+				crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?;
 
 				set_state
 			},
@@ -315,8 +314,7 @@ where
 					(new.canon_hash, new.canon_number),
 				);
 
-				#[allow(deprecated)]
-				crate::aux_schema::write_voter_set_state(&**self.client.backend(), &set_state)?;
+				crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?;
 
 				set_state
 			},
diff --git a/substrate/core/finality-grandpa/src/tests.rs b/substrate/core/finality-grandpa/src/tests.rs
index c82982e20d4..f73cab97f8a 100644
--- a/substrate/core/finality-grandpa/src/tests.rs
+++ b/substrate/core/finality-grandpa/src/tests.rs
@@ -112,21 +112,17 @@ impl TestNetFactory for GrandpaTestNet {
 		)
 	{
 		match client {
-			PeersClient::Full(ref client) => {
-				#[allow(deprecated)]
-				let select_chain = LongestChain::new(
-					client.backend().clone()
-				);
+			PeersClient::Full(ref client, ref backend) => {
 				let (import, link) = block_import(
 					client.clone(),
 					Arc::new(self.test_config.clone()),
-					select_chain,
+					LongestChain::new(backend.clone()),
 				).expect("Could not create block import for fresh peer.");
 				let justification_import = Box::new(import.clone());
 				let block_import = Box::new(import);
 				(block_import, Some(justification_import), None, None, Mutex::new(Some(link)))
 			},
-			PeersClient::Light(ref client) => {
+			PeersClient::Light(ref client, ref backend) => {
 				use crate::light_import::tests::light_block_import_without_justifications;
 
 				let authorities_provider = Arc::new(self.test_config.clone());
@@ -134,6 +130,7 @@ impl TestNetFactory for GrandpaTestNet {
 				// => light clients will try to fetch finality proofs
 				let import = light_block_import_without_justifications(
 					client.clone(),
+					backend.clone(),
 					authorities_provider,
 					Arc::new(self.test_config.clone())
 				).expect("Could not create block import for fresh peer.");
@@ -150,11 +147,11 @@ impl TestNetFactory for GrandpaTestNet {
 		client: PeersClient
 	) -> Option<Arc<dyn network::FinalityProofProvider<Block>>> {
 		match client {
-			PeersClient::Full(ref client) => {
+			PeersClient::Full(_, ref backend)  => {
 				let authorities_provider = Arc::new(self.test_config.clone());
-				Some(Arc::new(FinalityProofProvider::new(client.clone(), authorities_provider)))
+				Some(Arc::new(FinalityProofProvider::new(backend.clone(), authorities_provider)))
 			},
-			PeersClient::Light(_) => None,
+			PeersClient::Light(_, _) => None,
 		}
 	}
 
@@ -589,10 +586,7 @@ fn transition_3_voters_twice_1_full_observer() {
 		assert_eq!(full_client.info().chain.best_number, 1,
 					"Peer #{} failed to sync", i);
 
-		let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(
-			#[allow(deprecated)]
-			&**full_client.backend()
-		).unwrap();
+		let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(&*full_client).unwrap();
 
 		assert_eq!(set.current(), (0, make_ids(peers_a).as_slice()));
 		assert_eq!(set.pending_changes().count(), 0);
@@ -685,10 +679,7 @@ fn transition_3_voters_twice_1_full_observer() {
 				.for_each(move |_| Ok(()))
 				.map(move |()| {
 					let full_client = client.as_full().expect("only full clients are used in test");
-					let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(
-						#[allow(deprecated)]
-						&**full_client.backend()
-					).unwrap();
+					let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(&*full_client).unwrap();
 
 					assert_eq!(set.current(), (2, make_ids(peers_c).as_slice()));
 					assert_eq!(set.pending_changes().count(), 0);
@@ -963,10 +954,7 @@ fn force_change_to_new_set() {
 				"Peer #{} failed to sync", i);
 
 		let full_client = peer.client().as_full().expect("only full clients are used in test");
-		let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(
-			#[allow(deprecated)]
-			&**full_client.backend()
-		).unwrap();
+		let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(&*full_client).unwrap();
 
 		assert_eq!(set.current(), (1, voters.as_slice()));
 		assert_eq!(set.pending_changes().count(), 0);
@@ -1099,7 +1087,9 @@ fn voter_persists_its_votes() {
 	assert_eq!(net.peer(0).client().info().chain.best_number, 20,
 			   "Peer #{} failed to sync", 0);
 
-	let client = net.peer(0).client().clone();
+
+	let peer = net.peer(0);
+	let client = peer.client().clone();
 	let net = Arc::new(Mutex::new(net));
 
 	// channel between the voter and the main controller.
@@ -1258,9 +1248,8 @@ fn voter_persists_its_votes() {
 					})
 					.for_each(|_| Ok(()))
 					.and_then(move |_| {
-						#[allow(deprecated)]
 						let block_30_hash =
-							net.lock().peer(0).client().as_full().unwrap().backend().blockchain().hash(30).unwrap().unwrap();
+							net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap();
 
 						// we restart alice's voter
 						voter_tx.unbounded_send(()).unwrap();
diff --git a/substrate/core/network/src/chain.rs b/substrate/core/network/src/chain.rs
index e857aa095c9..a73d7e53e21 100644
--- a/substrate/core/network/src/chain.rs
+++ b/substrate/core/network/src/chain.rs
@@ -134,8 +134,9 @@ impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
 		}
 
 		let tree_route = ::client::blockchain::tree_route(
-			#[allow(deprecated)]
-			self.backend().blockchain(),
+			|id| self.header(&id)?.ok_or_else(||
+				client::error::Error::UnknownBlock(format!("{:?}", id))
+			),
 			BlockId::Hash(*block),
 			BlockId::Hash(*base),
 		)?;
diff --git a/substrate/core/network/src/test/mod.rs b/substrate/core/network/src/test/mod.rs
index 091047394b6..a025bd663a1 100644
--- a/substrate/core/network/src/test/mod.rs
+++ b/substrate/core/network/src/test/mod.rs
@@ -27,10 +27,13 @@ use std::sync::Arc;
 use crate::config::build_multiaddr;
 use log::trace;
 use crate::chain::FinalityProofProvider;
-use client::{self, ClientInfo, BlockchainEvents, BlockImportNotification, FinalityNotifications, FinalityNotification};
-use client::{in_mem::Backend as InMemoryBackend, error::Result as ClientResult};
+use client::{
+	self, ClientInfo, BlockchainEvents, BlockImportNotification, FinalityNotifications,
+	FinalityNotification, LongestChain
+};
+use client::error::Result as ClientResult;
 use client::block_builder::BlockBuilder;
-use client::backend::AuxStore;
+use client::backend::{AuxStore, Backend, Finalizer};
 use crate::config::Roles;
 use consensus::import_queue::BasicQueue;
 use consensus::import_queue::{
@@ -45,7 +48,7 @@ use crate::{NetworkWorker, NetworkService, config::ProtocolId};
 use crate::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder};
 use libp2p::PeerId;
 use parking_lot::Mutex;
-use primitives::{H256, Blake2Hasher};
+use primitives::H256;
 use crate::protocol::{Context, ProtocolConfig};
 use sr_primitives::generic::{BlockId, OpaqueDigestItemId};
 use sr_primitives::traits::{Block as BlockT, Header, NumberFor};
@@ -55,13 +58,14 @@ use crate::specialization::NetworkSpecialization;
 use test_client::{self, AccountKeyring};
 
 pub use test_client::runtime::{Block, Extrinsic, Hash, Transfer};
-pub use test_client::TestClient;
+pub use test_client::{TestClient, TestClientBuilder, TestClientBuilderExt};
 
 type AuthorityId = babe_primitives::AuthorityId;
 
 #[cfg(any(test, feature = "test-helpers"))]
 /// A Verifier that accepts all blocks and passes them on with the configured
 /// finality to be imported.
+#[derive(Clone)]
 pub struct PassThroughVerifier(pub bool);
 
 #[cfg(any(test, feature = "test-helpers"))]
@@ -131,66 +135,57 @@ pub type PeersLightClient =
 
 #[derive(Clone)]
 pub enum PeersClient {
-	Full(Arc<PeersFullClient>),
-	Light(Arc<PeersLightClient>),
+	Full(Arc<PeersFullClient>, Arc<test_client::Backend>),
+	Light(Arc<PeersLightClient>, Arc<test_client::LightBackend>),
 }
 
 impl PeersClient {
 	pub fn as_full(&self) -> Option<Arc<PeersFullClient>> {
 		match *self {
-			PeersClient::Full(ref client) => Some(client.clone()),
+			PeersClient::Full(ref client, ref _backend) => Some(client.clone()),
 			_ => None,
 		}
 	}
 
 	pub fn as_block_import(&self) -> BoxBlockImport<Block> {
 		match *self {
-			PeersClient::Full(ref client) => Box::new(client.clone()) as _,
-			PeersClient::Light(ref client) => Box::new(client.clone()) as _,
-		}
-	}
-
-	pub fn as_in_memory_backend(&self) -> InMemoryBackend<Block, Blake2Hasher> {
-		#[allow(deprecated)]
-		match *self {
-			PeersClient::Full(ref client) => client.backend().as_in_memory(),
-			PeersClient::Light(_) => unimplemented!("TODO"),
+			PeersClient::Full(ref client, ref _backend) => Box::new(client.clone()) as _,
+			PeersClient::Light(ref client, ref _backend) => Box::new(client.clone()) as _,
 		}
 	}
 
 	pub fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
-		#[allow(deprecated)]
 		match *self {
-			PeersClient::Full(ref client) => client.backend().get_aux(key),
-			PeersClient::Light(ref client) => client.backend().get_aux(key),
+			PeersClient::Full(ref client, ref _backend) => client.get_aux(key),
+			PeersClient::Light(ref client, ref _backend) => client.get_aux(key),
 		}
 	}
 
 	pub fn info(&self) -> ClientInfo<Block> {
 		match *self {
-			PeersClient::Full(ref client) => client.info(),
-			PeersClient::Light(ref client) => client.info(),
+			PeersClient::Full(ref client, ref _backend) => client.info(),
+			PeersClient::Light(ref client, ref _backend) => client.info(),
 		}
 	}
 
 	pub fn header(&self, block: &BlockId<Block>) -> ClientResult<Option<<Block as BlockT>::Header>> {
 		match *self {
-			PeersClient::Full(ref client) => client.header(block),
-			PeersClient::Light(ref client) => client.header(block),
+			PeersClient::Full(ref client, ref _backend) => client.header(block),
+			PeersClient::Light(ref client, ref _backend) => client.header(block),
 		}
 	}
 
 	pub fn justification(&self, block: &BlockId<Block>) -> ClientResult<Option<Justification>> {
 		match *self {
-			PeersClient::Full(ref client) => client.justification(block),
-			PeersClient::Light(ref client) => client.justification(block),
+			PeersClient::Full(ref client, ref _backend) => client.justification(block),
+			PeersClient::Light(ref client, ref _backend) => client.justification(block),
 		}
 	}
 
 	pub fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
 		match *self {
-			PeersClient::Full(ref client) => client.finality_notification_stream(),
-			PeersClient::Light(ref client) => client.finality_notification_stream(),
+			PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(),
+			PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(),
 		}
 	}
 
@@ -201,8 +196,8 @@ impl PeersClient {
 		notify: bool
 	) -> ClientResult<()> {
 		match *self {
-			PeersClient::Full(ref client) => client.finalize_block(id, justification, notify),
-			PeersClient::Light(ref client) => client.finalize_block(id, justification, notify),
+			PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify),
+			PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify),
 		}
 	}
 }
@@ -216,6 +211,8 @@ pub struct Peer<D, S: NetworkSpecialization<Block>> {
 	/// We keep a copy of the block_import so that we can invoke it for locally-generated blocks,
 	/// instead of going through the import queue.
 	block_import: Box<dyn BlockImport<Block, Error = ConsensusError>>,
+	select_chain: Option<LongestChain<test_client::Backend, Block>>,
+	backend: Option<Arc<test_client::Backend>>,
 	network: NetworkWorker<Block, S, <Block as BlockT>::Hash>,
 	imported_blocks_stream: Box<dyn Stream<Item = BlockImportNotification<Block>, Error = ()> + Send>,
 	finality_notification_stream: Box<dyn Stream<Item = FinalityNotification<Block>, Error = ()> + Send>,
@@ -227,6 +224,11 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
 		self.network.service().is_major_syncing()
 	}
 
+	// Returns a clone of the local SelectChain, only available on full nodes
+	pub fn select_chain(&self) -> Option<LongestChain<test_client::Backend, Block>> {
+		self.select_chain.clone()
+	}
+
 	/// Returns the number of peers we're connected to.
 	pub fn num_peers(&self) -> usize {
 		self.network.num_connected_peers()
@@ -342,6 +344,33 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
 	pub fn network_service(&self) -> &Arc<NetworkService<Block, S, <Block as BlockT>::Hash>> {
 		&self.network.service()
 	}
+
+	/// Test helper to compare the blockchain state of multiple (networked)
+	/// clients.
+	/// Potentially costly, as it creates in-memory copies of both blockchains in order
+	/// to compare them. If you have easier/softer checks that are sufficient, e.g. 
+	/// by using .info(), you should probably use it instead of this.
+	pub fn blockchain_canon_equals(&self, other: &Self) -> bool {
+		if let (Some(mine), Some(others)) = (self.backend.clone(), other.backend.clone()) {
+			mine.as_in_memory().blockchain()
+				.canon_equals_to(others.as_in_memory().blockchain())
+		} else {
+			false
+		}
+	}
+
+	/// Count the current number of known blocks. Note that:
+	///  1. this might be expensive as it creates an in-memory-copy of the chain
+	///     to count the blocks, thus if you have a different way of testing this
+	///     (e.g. `info.best_hash`) - use that.
+	///  2. This is not always increasing nor accurate, as the
+	///     orphaned and proven-to-never-finalized blocks may be pruned at any time.
+	///     Therefore, this number can drop again.
+	pub fn blocks_count(&self) -> usize {
+		self.backend.as_ref().map(
+			|backend| backend.as_in_memory().blockchain().blocks_count()
+		).unwrap_or(0)
+	}
 }
 
 pub struct EmptyTransactionPool;
@@ -467,11 +496,14 @@ pub trait TestNetFactory: Sized {
 
 	/// Add a full peer.
 	fn add_full_peer(&mut self, config: &ProtocolConfig) {
-		let client = Arc::new(test_client::new());
-		let verifier = self.make_verifier(PeersClient::Full(client.clone()), config);
+		let test_client_builder = TestClientBuilder::with_default_backend();
+		let backend = test_client_builder.backend();
+		let (c, longest_chain) = test_client_builder.build_with_longest_chain();
+		let client = Arc::new(c);
+		let verifier = self.make_verifier(PeersClient::Full(client.clone(), backend.clone()), config);
 		let verifier = VerifierAdapter(Arc::new(Mutex::new(Box::new(verifier) as Box<_>)));
 		let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data)
-			= self.make_block_import(PeersClient::Full(client.clone()));
+			= self.make_block_import(PeersClient::Full(client.clone(), backend.clone()));
 		let block_import = BlockImportAdapter(Arc::new(Mutex::new(block_import)));
 
 		let import_queue = Box::new(BasicQueue::new(
@@ -491,7 +523,7 @@ pub trait TestNetFactory: Sized {
 				..NetworkConfiguration::default()
 			},
 			chain: client.clone(),
-			finality_proof_provider: self.make_finality_proof_provider(PeersClient::Full(client.clone())),
+			finality_proof_provider: self.make_finality_proof_provider(PeersClient::Full(client.clone(), backend.clone())),
 			finality_proof_request_builder,
 			on_demand: None,
 			transaction_pool: Arc::new(EmptyTransactionPool),
@@ -512,7 +544,9 @@ pub trait TestNetFactory: Sized {
 
 			peers.push(Peer {
 				data,
-				client: PeersClient::Full(client),
+				client: PeersClient::Full(client, backend.clone()),
+				select_chain: Some(longest_chain),
+				backend: Some(backend),
 				imported_blocks_stream,
 				finality_notification_stream,
 				block_import: Box::new(block_import),
@@ -527,11 +561,12 @@ pub trait TestNetFactory: Sized {
 		let mut config = config.clone();
 		config.roles = Roles::LIGHT;
 
-		let client = Arc::new(test_client::new_light());
-		let verifier = self.make_verifier(PeersClient::Light(client.clone()), &config);
+		let (c, backend) = test_client::new_light();
+		let client = Arc::new(c);
+		let verifier = self.make_verifier(PeersClient::Light(client.clone(), backend.clone()), &config);
 		let verifier = VerifierAdapter(Arc::new(Mutex::new(Box::new(verifier) as Box<_>)));
 		let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data)
-			= self.make_block_import(PeersClient::Light(client.clone()));
+			= self.make_block_import(PeersClient::Light(client.clone(), backend.clone()));
 		let block_import = BlockImportAdapter(Arc::new(Mutex::new(block_import)));
 
 		let import_queue = Box::new(BasicQueue::new(
@@ -551,7 +586,7 @@ pub trait TestNetFactory: Sized {
 				..NetworkConfiguration::default()
 			},
 			chain: client.clone(),
-			finality_proof_provider: self.make_finality_proof_provider(PeersClient::Light(client.clone())),
+			finality_proof_provider: self.make_finality_proof_provider(PeersClient::Light(client.clone(), backend.clone())),
 			finality_proof_request_builder,
 			on_demand: None,
 			transaction_pool: Arc::new(EmptyTransactionPool),
@@ -573,8 +608,10 @@ pub trait TestNetFactory: Sized {
 			peers.push(Peer {
 				data,
 				verifier,
+				select_chain: None,
+				backend: None,
 				block_import: Box::new(block_import),
-				client: PeersClient::Light(client),
+				client: PeersClient::Light(client, backend),
 				imported_blocks_stream,
 				finality_notification_stream,
 				network,
diff --git a/substrate/core/network/src/test/sync.rs b/substrate/core/network/src/test/sync.rs
index f3a8f0c8ea4..6cba21c1719 100644
--- a/substrate/core/network/src/test/sync.rs
+++ b/substrate/core/network/src/test/sync.rs
@@ -14,7 +14,6 @@
 // You should have received a copy of the GNU General Public License
 // along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
 
-use client::{backend::Backend, blockchain::HeaderBackend};
 use crate::config::Roles;
 use consensus::BlockOrigin;
 use futures03::TryFutureExt as _;
@@ -36,8 +35,8 @@ fn test_ancestor_search_when_common_is(n: usize) {
 	net.peer(2).push_blocks(100, false);
 
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain()
-		.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -156,8 +155,8 @@ fn sync_from_two_peers_works() {
 	net.peer(1).push_blocks(100, false);
 	net.peer(2).push_blocks(100, false);
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain()
-		.equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 	assert!(!net.peer(0).is_major_syncing());
 }
 
@@ -170,8 +169,8 @@ fn sync_from_two_peers_with_ancestry_search_works() {
 	net.peer(1).push_blocks(100, false);
 	net.peer(2).push_blocks(100, false);
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain()
-		.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -185,8 +184,8 @@ fn ancestry_search_works_when_backoff_is_one() {
 	net.peer(2).push_blocks(2, false);
 
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain()
-		.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -200,8 +199,8 @@ fn ancestry_search_works_when_ancestor_is_genesis() {
 	net.peer(2).push_blocks(100, false);
 
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain()
-		.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -226,8 +225,8 @@ fn sync_long_chain_works() {
 	let mut net = TestNet::new(2);
 	net.peer(1).push_blocks(500, false);
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain()
-		.equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -238,8 +237,8 @@ fn sync_no_common_longer_chain_fails() {
 	net.peer(0).push_blocks(20, true);
 	net.peer(1).push_blocks(20, false);
 	net.block_until_sync(&mut runtime);
-	assert!(!net.peer(0).client.as_in_memory_backend().blockchain()
-		.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
+	let peer1 = &net.peers()[1];
+	assert!(!net.peers()[0].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -334,11 +333,11 @@ fn sync_after_fork_works() {
 	net.peer(2).push_blocks(1, false);
 
 	// peer 1 has the best chain
-	let peer1_chain = net.peer(1).client.as_in_memory_backend().blockchain().clone();
 	net.block_until_sync(&mut runtime);
-	assert!(net.peer(0).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
-	assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
-	assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
+	let peer1 = &net.peers()[1];
+	assert!(net.peers()[0].blockchain_canon_equals(peer1));
+	(net.peers()[1].blockchain_canon_equals(peer1));
+	(net.peers()[2].blockchain_canon_equals(peer1));
 }
 
 #[test]
@@ -354,8 +353,8 @@ fn syncs_all_forks() {
 
 	net.block_until_sync(&mut runtime);
 	// Check that all peers have all of the blocks.
-	assert_eq!(9, net.peer(0).client.as_in_memory_backend().blockchain().blocks_count());
-	assert_eq!(9, net.peer(1).client.as_in_memory_backend().blockchain().blocks_count());
+	assert_eq!(9, net.peer(0).blocks_count());
+	assert_eq!(9, net.peer(1).blocks_count());
 }
 
 #[test]
@@ -368,11 +367,11 @@ fn own_blocks_are_announced() {
 
 	net.block_until_sync(&mut runtime);
 
-	assert_eq!(net.peer(0).client.as_in_memory_backend().blockchain().info().best_number, 1);
-	assert_eq!(net.peer(1).client.as_in_memory_backend().blockchain().info().best_number, 1);
-	let peer0_chain = net.peer(0).client.as_in_memory_backend().blockchain().clone();
-	assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain));
-	assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain));
+	assert_eq!(net.peer(0).client.info().chain.best_number, 1);
+	assert_eq!(net.peer(1).client.info().chain.best_number, 1);
+	let peer0 = &net.peers()[0];
+	assert!(net.peers()[1].blockchain_canon_equals(peer0));
+	(net.peers()[2].blockchain_canon_equals(peer0));
 }
 
 #[test]
diff --git a/substrate/core/rpc/src/chain/tests.rs b/substrate/core/rpc/src/chain/tests.rs
index 36157df71df..af00f220e45 100644
--- a/substrate/core/rpc/src/chain/tests.rs
+++ b/substrate/core/rpc/src/chain/tests.rs
@@ -183,7 +183,7 @@ fn should_return_finalized_hash() {
 	);
 
 	// finalize
-	client.client.finalize_block(BlockId::number(1), None, true).unwrap();
+	client.client.finalize_block(BlockId::number(1), None).unwrap();
 	assert_matches!(
 		client.finalized_head(),
 		Ok(ref x) if x == &client.client.block_hash(1).unwrap().unwrap()
@@ -240,7 +240,7 @@ fn should_notify_about_finalized_block() {
 
 		let builder = api.client.new_block(Default::default()).unwrap();
 		api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
-		api.client.finalize_block(BlockId::number(1), None, true).unwrap();
+		api.client.finalize_block(BlockId::number(1), None).unwrap();
 	}
 
 	// assert initial head sent.
diff --git a/substrate/core/service/src/builder.rs b/substrate/core/service/src/builder.rs
index 3b079e549d8..fbcfa0f09db 100644
--- a/substrate/core/service/src/builder.rs
+++ b/substrate/core/service/src/builder.rs
@@ -57,9 +57,12 @@ use transaction_pool::txpool::{self, ChainApi, Pool as TransactionPool};
 /// The order in which the `with_*` methods are called doesn't matter, as the correct binding of
 /// generics is done when you call `build`.
 ///
-pub struct ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> {
+pub struct ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp,
+	TNetP, TExPool, TRpc, Backend>
+{
 	config: Configuration<TCfg, TGen>,
 	client: Arc<TCl>,
+	backend: Arc<Backend>,
 	keystore: Arc<RwLock<Keystore>>,
 	fetcher: Option<TFchr>,
 	select_chain: Option<TSc>,
@@ -72,7 +75,7 @@ pub struct ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFpr
 	marker: PhantomData<(TBl, TRtApi)>,
 }
 
-impl<TCfg, TGen> ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), (), ()>
+impl<TCfg, TGen> ServiceBuilder<(), (), TCfg, TGen, (), (), (), (), (), (), (), (), (), ()>
 where TGen: Serialize + DeserializeOwned + BuildStorage {
 	/// Start the service builder with a configuration.
 	pub fn new_full<TBl: BlockT<Hash=H256>, TRtApi, TExecDisp: NativeExecutionDispatch>(
@@ -95,7 +98,8 @@ where TGen: Serialize + DeserializeOwned + BuildStorage {
 		(),
 		(),
 		(),
-		()
+		(),
+		client_db::Backend<TBl>,
 	>, Error> {
 		let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?;
 
@@ -110,17 +114,20 @@ where TGen: Serialize + DeserializeOwned + BuildStorage {
 
 		let executor = NativeExecutor::<TExecDisp>::new(config.default_heap_pages);
 
-		let client = Arc::new(client_db::new_client(
+		let (client, backend) = client_db::new_client(
 			db_settings,
 			executor,
 			&config.chain_spec,
 			config.execution_strategies.clone(),
 			Some(keystore.clone()),
-		)?);
+		)?;
+
+		let client = Arc::new(client);
 
 		Ok(ServiceBuilder {
 			config,
 			client,
+			backend,
 			keystore,
 			fetcher: None,
 			select_chain: None,
@@ -177,7 +184,8 @@ where TGen: Serialize + DeserializeOwned + BuildStorage {
 		(),
 		(),
 		(),
-		()
+		(),
+		client::light::backend::Backend<client_db::light::LightStorage<TBl>, network::OnDemand<TBl>, Blake2Hasher>,
 	>, Error> {
 		let keystore = Keystore::open(config.keystore_path.clone(), config.keystore_password.clone())?;
 
@@ -196,12 +204,13 @@ where TGen: Serialize + DeserializeOwned + BuildStorage {
 		let light_blockchain = client::light::new_light_blockchain(db_storage);
 		let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor.clone()));
 		let fetcher = Arc::new(network::OnDemand::new(fetch_checker));
-		let client_backend = client::light::new_light_backend(light_blockchain, fetcher.clone());
-		let client = client::light::new_light(client_backend, fetcher.clone(), &config.chain_spec, executor)?;
+		let backend = client::light::new_light_backend(light_blockchain, fetcher.clone());
+		let client = client::light::new_light(backend.clone(), fetcher.clone(), &config.chain_spec, executor)?;
 
 		Ok(ServiceBuilder {
 			config,
 			client: Arc::new(client),
+			backend,
 			keystore,
 			fetcher: Some(fetcher),
 			select_chain: None,
@@ -216,14 +225,19 @@ where TGen: Serialize + DeserializeOwned + BuildStorage {
 	}
 }
 
-impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
-	ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc> {
+impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend>
+	ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend> {
 
 	/// Returns a reference to the client that was stored in this builder.
 	pub fn client(&self) -> &Arc<TCl> {
 		&self.client
 	}
 
+	/// Returns a reference to the backend that was used in this builder.
+	pub fn backend(&self) -> &Arc<Backend> {
+		&self.backend
+	}
+
 	/// Returns a reference to the select-chain that was stored in this builder.
 	pub fn select_chain(&self) -> Option<&TSc> {
 		self.select_chain.as_ref()
@@ -231,15 +245,16 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 
 	/// Defines which head-of-chain strategy to use.
 	pub fn with_opt_select_chain<USc>(
-		mut self,
-		select_chain_builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>) -> Result<Option<USc>, Error>
+		self,
+		select_chain_builder: impl FnOnce(&Configuration<TCfg, TGen>, &Arc<Backend>) -> Result<Option<USc>, Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, USc, TImpQu, TFprb, TFpp,
-		TNetP, TExPool, TRpc>, Error> {
-		let select_chain = select_chain_builder(&mut self.config, self.client.clone())?;
+		TNetP, TExPool, TRpc, Backend>, Error> {
+		let select_chain = select_chain_builder(&self.config, &self.backend)?;
 
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain,
@@ -256,22 +271,22 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 	/// Defines which head-of-chain strategy to use.
 	pub fn with_select_chain<USc>(
 		self,
-		builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>) -> Result<USc, Error>
+		builder: impl FnOnce(&Configuration<TCfg, TGen>, &Arc<Backend>) -> Result<USc, Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, USc, TImpQu, TFprb, TFpp,
-		TNetP, TExPool, TRpc>, Error> {
-		self.with_opt_select_chain(|cfg, cl| builder(cfg, cl).map(Option::Some))
+		TNetP, TExPool, TRpc, Backend>, Error> {
+		self.with_opt_select_chain(|cfg, b| builder(cfg, b).map(Option::Some))
 	}
 
 	/// Defines which import queue to use.
 	pub fn with_import_queue<UImpQu>(
-		mut self,
-		builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>)
+		self,
+		builder: impl FnOnce(&Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>)
 			-> Result<UImpQu, Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, UImpQu, TFprb, TFpp,
-			TNetP, TExPool, TRpc>, Error>
+			TNetP, TExPool, TRpc, Backend>, Error>
 	where TSc: Clone {
 		let import_queue = builder(
-			&mut self.config,
+			&self.config,
 			self.client.clone(),
 			self.select_chain.clone(),
 			self.transaction_pool.clone()
@@ -280,6 +295,7 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain: self.select_chain,
@@ -298,12 +314,13 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		self,
 		network_protocol_builder: impl FnOnce(&Configuration<TCfg, TGen>) -> Result<UNetP, Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp,
-		UNetP, TExPool, TRpc>, Error> {
+		UNetP, TExPool, TRpc, Backend>, Error> {
 		let network_protocol = network_protocol_builder(&self.config)?;
 
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain: self.select_chain,
@@ -320,7 +337,7 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 	/// Defines which strategy to use for providing finality proofs.
 	pub fn with_opt_finality_proof_provider(
 		self,
-		builder: impl FnOnce(Arc<TCl>) -> Result<Option<Arc<FinalityProofProvider<TBl>>>, Error>
+		builder: impl FnOnce(Arc<TCl>, Arc<Backend>) -> Result<Option<Arc<dyn FinalityProofProvider<TBl>>>, Error>
 	) -> Result<ServiceBuilder<
 		TBl,
 		TRtApi,
@@ -331,16 +348,18 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		TSc,
 		TImpQu,
 		TFprb,
-		Arc<FinalityProofProvider<TBl>>,
+		Arc<dyn FinalityProofProvider<TBl>>,
 		TNetP,
 		TExPool,
-		TRpc
+		TRpc,
+		Backend,
 	>, Error> {
-		let finality_proof_provider = builder(self.client.clone())?;
+		let finality_proof_provider = builder(self.client.clone(), self.backend.clone())?;
 
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain: self.select_chain,
@@ -357,7 +376,7 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 	/// Defines which strategy to use for providing finality proofs.
 	pub fn with_finality_proof_provider(
 		self,
-		build: impl FnOnce(Arc<TCl>) -> Result<Arc<FinalityProofProvider<TBl>>, Error>
+		build: impl FnOnce(Arc<TCl>, Arc<Backend>) -> Result<Arc<dyn FinalityProofProvider<TBl>>, Error>
 	) -> Result<ServiceBuilder<
 		TBl,
 		TRtApi,
@@ -368,25 +387,27 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		TSc,
 		TImpQu,
 		TFprb,
-		Arc<FinalityProofProvider<TBl>>,
+		Arc<dyn FinalityProofProvider<TBl>>,
 		TNetP,
 		TExPool,
-		TRpc
+		TRpc,
+		Backend,
 	>, Error> {
-		self.with_opt_finality_proof_provider(|client| build(client).map(Option::Some))
+		self.with_opt_finality_proof_provider(|client, backend| build(client, backend).map(Option::Some))
 	}
 
 	/// Defines which import queue to use.
 	pub fn with_import_queue_and_opt_fprb<UImpQu, UFprb>(
-		mut self,
-		builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>)
+		self,
+		builder: impl FnOnce(&Configuration<TCfg, TGen>, Arc<TCl>, Arc<Backend>, Option<TSc>, Arc<TExPool>)
 			-> Result<(UImpQu, Option<UFprb>), Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, UImpQu, UFprb, TFpp,
-		TNetP, TExPool, TRpc>, Error>
+		TNetP, TExPool, TRpc, Backend>, Error>
 	where TSc: Clone {
 		let (import_queue, fprb) = builder(
-			&mut self.config,
+			&self.config,
 			self.client.clone(),
+			self.backend.clone(),
 			self.select_chain.clone(),
 			self.transaction_pool.clone()
 		)?;
@@ -394,6 +415,7 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain: self.select_chain,
@@ -410,12 +432,12 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 	/// Defines which import queue to use.
 	pub fn with_import_queue_and_fprb<UImpQu, UFprb>(
 		self,
-		builder: impl FnOnce(&mut Configuration<TCfg, TGen>, Arc<TCl>, Option<TSc>, Arc<TExPool>)
+		builder: impl FnOnce(&Configuration<TCfg, TGen>, Arc<TCl>, Arc<Backend>, Option<TSc>, Arc<TExPool>)
 			-> Result<(UImpQu, UFprb), Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, UImpQu, UFprb, TFpp,
-			TNetP, TExPool, TRpc>, Error>
+			TNetP, TExPool, TRpc, Backend>, Error>
 	where TSc: Clone {
-		self.with_import_queue_and_opt_fprb(|cfg, cl, sc, tx| builder(cfg, cl, sc, tx).map(|(q, f)| (q, Some(f))))
+		self.with_import_queue_and_opt_fprb(|cfg, cl, b, sc, tx| builder(cfg, cl, b, sc, tx).map(|(q, f)| (q, Some(f))))
 	}
 
 	/// Defines which transaction pool to use.
@@ -423,12 +445,13 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		self,
 		transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc<TCl>) -> Result<UExPool, Error>
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp,
-		TNetP, UExPool, TRpc>, Error> {
+		TNetP, UExPool, TRpc, Backend>, Error> {
 		let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?;
 
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain: self.select_chain,
@@ -447,12 +470,13 @@ impl<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPo
 		self,
 		rpc_ext_builder: impl FnOnce(Arc<TCl>, Arc<TExPool>) -> URpc
 	) -> Result<ServiceBuilder<TBl, TRtApi, TCfg, TGen, TCl, TFchr, TSc, TImpQu, TFprb, TFpp,
-		TNetP, TExPool, URpc>, Error> {
+		TNetP, TExPool, URpc, Backend>, Error> {
 		let rpc_extensions = rpc_ext_builder(self.client.clone(), self.transaction_pool.clone());
 
 		Ok(ServiceBuilder {
 			config: self.config,
 			client: self.client,
+			backend: self.backend,
 			keystore: self.keystore,
 			fetcher: self.fetcher,
 			select_chain: self.select_chain,
@@ -508,9 +532,9 @@ pub trait ServiceBuilderRevert {
 	) -> Result<(), Error>;
 }
 
-impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
+impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend>
 	ServiceBuilderImport for ServiceBuilder<TBl, TRtApi, TCfg, TGen, Client<TBackend, TExec, TBl, TRtApi>,
-		TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
+		TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend>
 where
 	TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
 	TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send,
@@ -532,7 +556,7 @@ where
 
 impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
 	ServiceBuilderExport for ServiceBuilder<TBl, TRtApi, TCfg, TGen, Client<TBackend, TExec, TBl, TRtApi>,
-		TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
+		TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, TBackend>
 where
 	TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
 	TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send,
@@ -555,7 +579,7 @@ where
 
 impl<TBl, TRtApi, TCfg, TGen, TBackend, TExec, TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
 	ServiceBuilderRevert for ServiceBuilder<TBl, TRtApi, TCfg, TGen, Client<TBackend, TExec, TBl, TRtApi>,
-		TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc>
+		TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, TBackend>
 where
 	TBl: BlockT<Hash = <Blake2Hasher as Hasher>::Out>,
 	TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher> + Send,
@@ -583,10 +607,11 @@ ServiceBuilder<
 	TSc,
 	TImpQu,
 	BoxFinalityProofRequestBuilder<TBl>,
-	Arc<FinalityProofProvider<TBl>>,
+	Arc<dyn FinalityProofProvider<TBl>>,
 	TNetP,
 	TransactionPool<TExPoolApi>,
-	TRpc
+	TRpc,
+	TBackend
 > where
 	Client<TBackend, TExec, TBl, TRtApi>: ProvideRuntimeApi,
 	<Client<TBackend, TExec, TBl, TRtApi> as ProvideRuntimeApi>::Api:
@@ -608,7 +633,6 @@ ServiceBuilder<
 {
 	/// Builds the service.
 	pub fn build(self) -> Result<NewService<
-		Configuration<TCfg, TGen>,
 		TBl,
 		Client<TBackend, TExec, TBl, TRtApi>,
 		TSc,
@@ -629,6 +653,7 @@ ServiceBuilder<
 		let (
 			client,
 			fetcher,
+			backend,
 			keystore,
 			select_chain,
 			import_queue,
@@ -640,6 +665,7 @@ ServiceBuilder<
 		) = (
 			self.client,
 			self.fetcher,
+			self.backend,
 			self.keystore,
 			self.select_chain,
 			self.import_queue,
@@ -657,6 +683,7 @@ ServiceBuilder<
 				Ok((
 					client,
 					fetcher,
+					backend,
 					keystore,
 					select_chain,
 					import_queue,
diff --git a/substrate/core/service/src/lib.rs b/substrate/core/service/src/lib.rs
index a19121188a1..ad359767b29 100644
--- a/substrate/core/service/src/lib.rs
+++ b/substrate/core/service/src/lib.rs
@@ -66,7 +66,7 @@ pub use futures::future::Executor;
 const DEFAULT_PROTOCOL_ID: &str = "sup";
 
 /// Substrate service.
-pub struct NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
+pub struct NewService<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
 	client: Arc<TCl>,
 	select_chain: Option<TSc>,
 	network: Arc<TNet>,
@@ -91,8 +91,6 @@ pub struct NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
 	/// If spawning a background task is not possible, we instead push the task into this `Vec`.
 	/// The elements must then be polled manually.
 	to_poll: Vec<Box<dyn Future<Item = (), Error = ()> + Send>>,
-	/// Configuration of this Service
-	config: TCfg,
 	rpc_handlers: rpc_servers::RpcHandler<rpc::Metadata>,
 	_rpc: Box<dyn std::any::Any + Send + Sync>,
 	_telemetry: Option<tel::Telemetry>,
@@ -148,6 +146,7 @@ macro_rules! new_impl {
 		let (
 			client,
 			on_demand,
+			backend,
 			keystore,
 			select_chain,
 			import_queue,
@@ -156,7 +155,7 @@ macro_rules! new_impl {
 			network_protocol,
 			transaction_pool,
 			rpc_extensions
-		) = $build_components(&mut $config)?;
+		) = $build_components(&$config)?;
 		let import_queue = Box::new(import_queue);
 		let chain_info = client.info().chain;
 
@@ -206,8 +205,7 @@ macro_rules! new_impl {
 		let network = network_mut.service().clone();
 		let network_status_sinks = Arc::new(Mutex::new(Vec::new()));
 
-		#[allow(deprecated)]
-		let offchain_storage = client.backend().offchain_storage();
+		let offchain_storage = backend.offchain_storage();
 		let offchain_workers = match ($config.offchain_worker, offchain_storage) {
 			(true, Some(db)) => {
 				Some(Arc::new(offchain::OffchainWorkers::new(client.clone(), db)))
@@ -301,9 +299,7 @@ macro_rules! new_impl {
 			let bandwidth_download = net_status.average_download_per_sec;
 			let bandwidth_upload = net_status.average_upload_per_sec;
 
-			#[allow(deprecated)]
-			let backend = (*client_).backend();
-			let used_state_cache_size = match backend.used_state_cache_size(){
+			let used_state_cache_size = match info.used_state_cache_size {
 				Some(size) => size,
 				None => 0,
 			};
@@ -426,7 +422,6 @@ macro_rules! new_impl {
 			to_spawn_tx,
 			to_spawn_rx,
 			to_poll: Vec::new(),
-			$config,
 			rpc_handlers,
 			_rpc: rpc,
 			_telemetry: telemetry,
@@ -451,8 +446,6 @@ pub trait AbstractService: 'static + Future<Item = (), Error = Error> +
 	type CallExecutor: 'static + client::CallExecutor<Self::Block, Blake2Hasher> + Send + Sync + Clone;
 	/// API that the runtime provides.
 	type RuntimeApi: Send + Sync;
-	/// Configuration struct of the service.
-	type Config;
 	/// Chain selection algorithm.
 	type SelectChain: consensus_common::SelectChain<Self::Block>;
 	/// API of the transaction pool.
@@ -463,12 +456,6 @@ pub trait AbstractService: 'static + Future<Item = (), Error = Error> +
 	/// Get event stream for telemetry connection established events.
 	fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()>;
 
-	/// Returns the configuration passed on construction.
-	fn config(&self) -> &Self::Config;
-
-	/// Returns the configuration passed on construction.
-	fn config_mut(&mut self) -> &mut Self::Config;
-
 	/// return a shared instance of Telemetry (if enabled)
 	fn telemetry(&self) -> Option<tel::Telemetry>;
 
@@ -516,10 +503,10 @@ pub trait AbstractService: 'static + Future<Item = (), Error = Error> +
 	fn on_exit(&self) -> ::exit_future::Exit;
 }
 
-impl<TCfg, TBl, TBackend, TExec, TRtApi, TSc, TNetSpec, TExPoolApi, TOc> AbstractService for
-	NewService<TCfg, TBl, Client<TBackend, TExec, TBl, TRtApi>, TSc, NetworkStatus<TBl>,
+impl<TBl, TBackend, TExec, TRtApi, TSc, TNetSpec, TExPoolApi, TOc> AbstractService for
+	NewService<TBl, Client<TBackend, TExec, TBl, TRtApi>, TSc, NetworkStatus<TBl>,
 		NetworkService<TBl, TNetSpec, H256>, TransactionPool<TExPoolApi>, TOc>
-where TCfg: 'static + Send,
+where
 	TBl: BlockT<Hash = H256>,
 	TBackend: 'static + client::backend::Backend<TBl, Blake2Hasher>,
 	TExec: 'static + client::CallExecutor<TBl, Blake2Hasher> + Send + Sync + Clone,
@@ -533,19 +520,10 @@ where TCfg: 'static + Send,
 	type Backend = TBackend;
 	type CallExecutor = TExec;
 	type RuntimeApi = TRtApi;
-	type Config = TCfg;
 	type SelectChain = TSc;
 	type TransactionPoolApi = TExPoolApi;
 	type NetworkSpecialization = TNetSpec;
 
-	fn config(&self) -> &Self::Config {
-		&self.config
-	}
-
-	fn config_mut(&mut self) -> &mut Self::Config {
-		&mut self.config
-	}
-
 	fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()> {
 		let (sink, stream) = mpsc::unbounded();
 		self._telemetry_on_connect_sinks.lock().push(sink);
@@ -611,8 +589,9 @@ where TCfg: 'static + Send,
 	}
 }
 
-impl<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Future for
-NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
+impl<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Future for
+	NewService<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc>
+{
 	type Item = ();
 	type Error = Error;
 
@@ -643,8 +622,9 @@ NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
 	}
 }
 
-impl<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Executor<Box<dyn Future<Item = (), Error = ()> + Send>> for
-NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
+impl<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Executor<Box<dyn Future<Item = (), Error = ()> + Send>> for
+	NewService<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc>
+{
 	fn execute(
 		&self,
 		future: Box<dyn Future<Item = (), Error = ()> + Send>
@@ -787,8 +767,9 @@ pub struct NetworkStatus<B: BlockT> {
 	pub average_upload_per_sec: u64,
 }
 
-impl<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Drop for
-NewService<TCfg, TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> {
+impl<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc> Drop for
+	NewService<TBl, TCl, TSc, TNetStatus, TNet, TTxPool, TOc>
+{
 	fn drop(&mut self) {
 		debug!(target: "service", "Substrate service shutdown");
 		if let Some(signal) = self.signal.take() {
diff --git a/substrate/core/test-client/src/client_ext.rs b/substrate/core/test-client/src/client_ext.rs
index b29a7db471e..7d3d7301c55 100644
--- a/substrate/core/test-client/src/client_ext.rs
+++ b/substrate/core/test-client/src/client_ext.rs
@@ -16,7 +16,7 @@
 
 //! Client extension for tests.
 
-use client::{self, Client};
+use client::{self, Client, backend::Finalizer};
 use consensus::{
 	BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError,
 	ForkChoiceStrategy,
@@ -126,7 +126,7 @@ impl<B, E, RA, Block> ClientExt<Block> for Client<B, E, Block, RA>
 		id: BlockId<Block>,
 		justification: Option<Justification>,
 	) -> client::error::Result<()> {
-		self.finalize_block(id, justification, true)
+		Finalizer::finalize_block(self, id, justification, true)
 	}
 
 	fn genesis_hash(&self) -> <Block as BlockT>::Hash {
diff --git a/substrate/core/test-client/src/lib.rs b/substrate/core/test-client/src/lib.rs
index 79e1027b954..17e26708468 100644
--- a/substrate/core/test-client/src/lib.rs
+++ b/substrate/core/test-client/src/lib.rs
@@ -100,6 +100,11 @@ impl<Block, Executor, G: GenesisInit> TestClientBuilder<
 		let backend = Arc::new(Backend::new_test(std::u32::MAX, std::u64::MAX));
 		Self::with_backend(backend)
 	}
+
+	/// Give access to the underlying backend of these clients
+	pub fn backend(&self) -> Arc<Backend<Block>> {
+		self.backend.clone()
+	}
 }
 
 impl<Executor, Backend, G: GenesisInit> TestClientBuilder<Executor, Backend, G> {
diff --git a/substrate/core/test-runtime/client/src/lib.rs b/substrate/core/test-runtime/client/src/lib.rs
index ad5badf8bf3..767c862083f 100644
--- a/substrate/core/test-runtime/client/src/lib.rs
+++ b/substrate/core/test-runtime/client/src/lib.rs
@@ -22,6 +22,7 @@ pub mod trait_tests;
 
 mod block_builder_ext;
 
+use std::sync::Arc;
 pub use block_builder_ext::BlockBuilderExt;
 pub use generic_test_client::*;
 pub use runtime;
@@ -228,8 +229,10 @@ pub fn new() -> Client<Backend> {
 }
 
 /// Creates new light client instance used for tests.
-pub fn new_light() -> client::Client<LightBackend, LightExecutor, runtime::Block, runtime::RuntimeApi> {
-	use std::sync::Arc;
+pub fn new_light() -> (
+	client::Client<LightBackend, LightExecutor, runtime::Block, runtime::RuntimeApi>,
+	Arc<LightBackend>,
+) {
 
 	let storage = client_db::light::LightStorage::new_test();
 	let blockchain = Arc::new(client::light::blockchain::Blockchain::new(storage));
@@ -247,7 +250,10 @@ pub fn new_light() -> client::Client<LightBackend, LightExecutor, runtime::Block
 		local_call_executor,
 	);
 
-	TestClientBuilder::with_backend(backend)
+	(TestClientBuilder::with_backend(backend.clone())
 		.build_with_executor(call_executor)
-		.0
+		.0,
+	backend,
+	)
+
 }
diff --git a/substrate/node-template/src/service.rs b/substrate/node-template/src/service.rs
index 7bb4cc8d52d..5b53a646d3e 100644
--- a/substrate/node-template/src/service.rs
+++ b/substrate/node-template/src/service.rs
@@ -39,9 +39,8 @@ macro_rules! new_full_start {
 		let builder = substrate_service::ServiceBuilder::new_full::<
 			node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor
 		>($config)?
-			.with_select_chain(|_config, client| {
-				#[allow(deprecated)]
-				Ok(substrate_client::LongestChain::new(client.backend().clone()))
+			.with_select_chain(|_config, backend| {
+				Ok(substrate_client::LongestChain::new(backend.clone()))
 			})?
 			.with_transaction_pool(|config, client|
 				Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client)))
@@ -81,11 +80,16 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
 	-> Result<impl AbstractService, ServiceError>
 {
 
+	let is_authority = config.roles.is_authority();
+	let name = config.name.clone();
+	let disable_grandpa = config.disable_grandpa;
+	let force_authoring = config.force_authoring;
+
 	let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!(config);
 
 	let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))?
-		.with_finality_proof_provider(|client|
-			Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)
+		.with_finality_proof_provider(|client, backend|
+			Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _)
 		)?
 		.build()?;
 
@@ -104,7 +108,7 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
 		}
 	}
 
-	if service.config().roles.is_authority() {
+	if is_authority {
 		let proposer = basic_authorship::ProposerFactory {
 			client: service.client(),
 			transaction_pool: service.transaction_pool(),
@@ -123,7 +127,7 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
 			env: proposer,
 			sync_oracle: service.network(),
 			inherent_data_providers: inherent_data_providers.clone(),
-			force_authoring: service.config().force_authoring,
+			force_authoring: force_authoring,
 			time_source: babe_link,
 		};
 
@@ -135,19 +139,19 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
 		service.spawn_essential_task(select);
 	}
 
-	let config = grandpa::Config {
+	let grandpa_config = grandpa::Config {
 		// FIXME #1578 make this available through chainspec
 		gossip_duration: Duration::from_millis(333),
 		justification_period: 4096,
-		name: Some(service.config().name.clone()),
+		name: Some(name),
 		keystore: Some(service.keystore()),
 	};
 
-	match (service.config().roles.is_authority(), service.config().disable_grandpa) {
+	match (is_authority, disable_grandpa) {
 		(false, false) => {
 			// start the lightweight GRANDPA observer
 			service.spawn_task(Box::new(grandpa::run_grandpa_observer(
-				config,
+				grandpa_config,
 				link_half,
 				service.network(),
 				service.on_exit(),
@@ -155,8 +159,8 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
 		},
 		(true, false) => {
 			// start the full GRANDPA voter
-			let grandpa_config = grandpa::GrandpaParams {
-				config: config,
+			let voter_config = grandpa::GrandpaParams {
+				config: grandpa_config,
 				link: link_half,
 				network: service.network(),
 				inherent_data_providers: inherent_data_providers.clone(),
@@ -166,7 +170,7 @@ pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisCon
 
 			// the GRANDPA voter task is considered infallible, i.e.
 			// if it fails we take down the service with it.
-			service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?);
+			service.spawn_essential_task(grandpa::run_grandpa_voter(voter_config)?);
 		},
 		(_, true) => {
 			grandpa::setup_disabled_grandpa(
@@ -187,21 +191,19 @@ pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisCo
 	let inherent_data_providers = InherentDataProviders::new();
 
 	ServiceBuilder::new_light::<Block, RuntimeApi, Executor>(config)?
-		.with_select_chain(|_config, client| {
-			#[allow(deprecated)]
-			Ok(LongestChain::new(client.backend().clone()))
+		.with_select_chain(|_config, backend| {
+			Ok(LongestChain::new(backend.clone()))
 		})?
 		.with_transaction_pool(|config, client|
 			Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
 		)?
-		.with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| {
-			#[allow(deprecated)]
-			let fetch_checker = client.backend().blockchain().fetcher()
+		.with_import_queue_and_fprb(|_config, client, backend, _select_chain, transaction_pool| {
+			let fetch_checker = backend.blockchain().fetcher()
 				.upgrade()
 				.map(|fetcher| fetcher.checker().clone())
 				.ok_or_else(|| "Trying to start light import queue without active fetch checker")?;
 			let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>(
-				client.clone(), Arc::new(fetch_checker), client.clone()
+				client.clone(), backend, Arc::new(fetch_checker), client.clone()
 			)?;
 
 			let finality_proof_import = block_import.clone();
@@ -223,8 +225,8 @@ pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisCo
 			Ok((import_queue, finality_proof_request_builder))
 		})?
 		.with_network_protocol(|_| Ok(NodeProtocol::new()))?
-		.with_finality_proof_provider(|client|
-			Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)
+		.with_finality_proof_provider(|client, backend|
+			Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _)
 		)?
 		.build()
 }
diff --git a/substrate/node/cli/src/service.rs b/substrate/node/cli/src/service.rs
index 01cad95c467..da78d74a873 100644
--- a/substrate/node/cli/src/service.rs
+++ b/substrate/node/cli/src/service.rs
@@ -51,9 +51,8 @@ macro_rules! new_full_start {
 		let builder = substrate_service::ServiceBuilder::new_full::<
 			node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor
 		>($config)?
-			.with_select_chain(|_config, client| {
-				#[allow(deprecated)]
-				Ok(client::LongestChain::new(client.backend().clone()))
+			.with_select_chain(|_config, backend| {
+				Ok(client::LongestChain::new(backend.clone()))
 			})?
 			.with_transaction_pool(|config, client|
 				Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client)))
@@ -105,11 +104,23 @@ macro_rules! new_full {
 	($config:expr) => {{
 		use futures::Future;
 
+		let (
+			is_authority,
+			force_authoring,
+			name,
+			disable_grandpa
+		) = (
+			$config.roles.is_authority(),
+			$config.force_authoring,
+			$config.name.clone(),
+			$config.disable_grandpa
+		);
+
 		let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!($config);
 
 		let service = builder.with_network_protocol(|_| Ok(crate::service::NodeProtocol::new()))?
-			.with_finality_proof_provider(|client|
-				Ok(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _)
+			.with_finality_proof_provider(|client, backend|
+				Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, client)) as _)
 			)?
 			.build()?;
 
@@ -125,7 +136,7 @@ macro_rules! new_full {
 			);
 		}
 
-		if service.config().roles.is_authority() {
+		if is_authority {
 			let proposer = substrate_basic_authorship::ProposerFactory {
 				client: service.client(),
 				transaction_pool: service.transaction_pool(),
@@ -144,7 +155,7 @@ macro_rules! new_full {
 				env: proposer,
 				sync_oracle: service.network(),
 				inherent_data_providers: inherent_data_providers.clone(),
-				force_authoring: service.config().force_authoring,
+				force_authoring: force_authoring,
 				time_source: babe_link,
 			};
 
@@ -157,11 +168,11 @@ macro_rules! new_full {
 			// FIXME #1578 make this available through chainspec
 			gossip_duration: std::time::Duration::from_millis(333),
 			justification_period: 4096,
-			name: Some(service.config().name.clone()),
+			name: Some(name),
 			keystore: Some(service.keystore()),
 		};
 
-		match (service.config().roles.is_authority(), service.config().disable_grandpa) {
+		match (is_authority, disable_grandpa) {
 			(false, false) => {
 				// start the lightweight GRANDPA observer
 				service.spawn_task(Box::new(grandpa::run_grandpa_observer(
@@ -211,21 +222,19 @@ pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisCo
 	let mut tasks_to_spawn = Vec::new();
 
 	let service = ServiceBuilder::new_light::<Block, RuntimeApi, node_executor::Executor>(config)?
-		.with_select_chain(|_config, client| {
-			#[allow(deprecated)]
-			Ok(LongestChain::new(client.backend().clone()))
+		.with_select_chain(|_config, backend| {
+			Ok(LongestChain::new(backend.clone()))
 		})?
 		.with_transaction_pool(|config, client|
 			Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
 		)?
-		.with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| {
-			#[allow(deprecated)]
-			let fetch_checker = client.backend().blockchain().fetcher()
+		.with_import_queue_and_fprb(|_config, client, backend, _select_chain, transaction_pool| {
+			let fetch_checker = backend.blockchain().fetcher()
 				.upgrade()
 				.map(|fetcher| fetcher.checker().clone())
 				.ok_or_else(|| "Trying to start light import queue without active fetch checker")?;
 			let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>(
-				client.clone(), Arc::new(fetch_checker), client.clone()
+				client.clone(), backend, Arc::new(fetch_checker), client.clone()
 			)?;
 
 			let finality_proof_import = block_import.clone();
@@ -248,8 +257,8 @@ pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisCo
 			Ok((import_queue, finality_proof_request_builder))
 		})?
 		.with_network_protocol(|_| Ok(NodeProtocol::new()))?
-		.with_finality_proof_provider(|client|
-			Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)
+		.with_finality_proof_provider(|client, backend|
+			Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _)
 		)?
 		.with_rpc_extensions(|client, pool| {
 			use node_rpc::accounts::{Accounts, AccountsApi};
-- 
GitLab