diff --git a/substrate/core/client/db/src/cache.rs b/substrate/core/client/db/src/cache.rs
deleted file mode 100644
index 4538709f52bddae87730669938c9ac6f473cf2e7..0000000000000000000000000000000000000000
--- a/substrate/core/client/db/src/cache.rs
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2017 Parity Technologies (UK) Ltd.
-// This file is part of Substrate.
-
-// Substrate is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Substrate is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
-
-//! DB-backed cache of blockchain data.
-
-use std::sync::Arc;
-use parking_lot::RwLock;
-
-use kvdb::{KeyValueDB, DBTransaction};
-
-use client::blockchain::Cache as BlockchainCache;
-use client::error::Result as ClientResult;
-use codec::{Codec, Encode, Decode};
-use primitives::AuthorityId;
-use runtime_primitives::generic::BlockId;
-use runtime_primitives::traits::{Block as BlockT, As, NumberFor};
-use utils::{COLUMN_META, BlockLookupKey, db_err, meta_keys, lookup_key_to_number, number_to_lookup_key};
-
-/// Database-backed cache of blockchain data.
-pub struct DbCache<Block: BlockT> {
-	db: Arc<KeyValueDB>,
-	block_index_column: Option<u32>,
-	header_column: Option<u32>,
-	authorities_at: DbCacheList<Block, Vec<AuthorityId>>,
-}
-
-impl<Block> DbCache<Block>
-	where
-		Block: BlockT,
-		NumberFor<Block>: As<u64>,
-{
-	/// Create new cache.
-	pub fn new(
-		db: Arc<KeyValueDB>,
-		block_index_column: Option<u32>,
-		header_column: Option<u32>,
-		authorities_column: Option<u32>
-	) -> ClientResult<Self> {
-		Ok(DbCache {
-			db: db.clone(),
-			block_index_column,
-			header_column,
-			authorities_at: DbCacheList::new(db, meta_keys::BEST_AUTHORITIES, authorities_column)?,
-		})
-	}
-
-	/// Get authorities_cache.
-	#[allow(unused)]
-	pub fn authorities_at_cache(&self) -> &DbCacheList<Block, Vec<AuthorityId>> {
-		&self.authorities_at
-	}
-}
-
-impl<Block> BlockchainCache<Block> for DbCache<Block>
-	where
-		Block: BlockT,
-		NumberFor<Block>: As<u64>,
-{
-	fn authorities_at(&self, at: BlockId<Block>) -> Option<Vec<AuthorityId>> {
-		use runtime_primitives::traits::Header as HeaderT;
-
-		let number = match at {
-			BlockId::Number(n) => Ok(number_to_lookup_key(n)),
-			BlockId::Hash(h) => {
-				let maybe_header = ::utils::read_header::<Block>(
-					&*self.db,
-					self.block_index_column,
-					self.header_column,
-					BlockId::Hash(h),
-				);
-
-				match maybe_header {
-					Ok(Some(hdr)) => Ok(number_to_lookup_key(*hdr.number())),
-					Ok(None) => return None, // no such block.
-					Err(e) => Err(e),
-				}
-			}
-		};
-
-		let authorities_at = number.and_then(|at| self.authorities_at.value_at_key(at));
-
-		match authorities_at {
-			Ok(authorities) => authorities,
-			Err(error) => {
-				warn!("Trying to read authorities from db cache has failed with: {}", error);
-				None
-			},
-		}
-	}
-}
-
-/// Database-backed blockchain cache which holds its entries as a list.
-/// The meta column holds the pointer to the best known cache entry and
-/// every entry points to the previous entry.
-/// New entry appears when the set of authorities changes in block, so the
-/// best entry here means the entry that is valid for the best block (and
-/// probably for its ascendants).
-pub struct DbCacheList<Block: BlockT, T: Clone> {
-	db: Arc<KeyValueDB>,
-	meta_key: &'static [u8],
-	column: Option<u32>,
-	/// Best entry at the moment. None means that cache has no entries at all.
-	best_entry: RwLock<Option<Entry<NumberFor<Block>, T>>>,
-}
-
-/// Single cache entry.
-#[derive(Clone)]
-#[cfg_attr(test, derive(Debug, PartialEq))]
-pub struct Entry<N, T: Clone> {
-	/// first block, when this value became actual
-	valid_from: N,
-	/// None means that we do not know the value starting from `valid_from` block
-	value: Option<T>,
-}
-
-/// Internal representation of the single cache entry. The entry points to the
-/// previous entry in the cache, allowing us to traverse back in time in list-style.
-#[derive(Encode, Decode)]
-#[cfg_attr(test, derive(Debug, PartialEq))]
-struct StorageEntry<N, T> {
-	/// None if valid from the beginning
-	prev_valid_from: Option<N>,
-	/// None means that we do not know the value starting from `valid_from` block
-	value: Option<T>,
-}
-
-impl<Block, T> DbCacheList<Block, T>
-	where
-		Block: BlockT,
-		NumberFor<Block>: As<u64>,
-		T: Clone + PartialEq + Codec,
-{
-	/// Creates new cache list.
-	fn new(db: Arc<KeyValueDB>, meta_key: &'static [u8], column: Option<u32>) -> ClientResult<Self> {
-		let best_entry = RwLock::new(db.get(COLUMN_META, meta_key)
-			.map_err(db_err)
-			.and_then(|block| match block {
-				Some(block) => {
-					let valid_from = lookup_key_to_number(&block)?;
-					read_storage_entry::<Block, T>(&*db, column, valid_from)
-						.map(|entry| Some(Entry {
-							valid_from,
-							value: entry
-								.expect("meta entry references the entry at the block; storage entry at block exists when referenced; qed")
-								.value,
-						}))
-				},
-				None => Ok(None),
-			})?);
-
-		Ok(DbCacheList {
-			db,
-			column,
-			meta_key,
-			best_entry,
-		})
-	}
-
-	/// Gets the best known entry.
-	pub fn best_entry(&self) -> Option<Entry<NumberFor<Block>, T>> {
-		self.best_entry.read().clone()
-	}
-
-	/// Commits the new best pending value to the database. Returns Some if best entry must
-	/// be updated after transaction is committed.
-	#[allow(unused)]
-	pub fn commit_best_entry(
-		&self,
-		transaction: &mut DBTransaction,
-		valid_from: NumberFor<Block>,
-		pending_value: Option<T>
-	) -> Option<Entry<NumberFor<Block>, T>> {
-		let best_entry = self.best_entry();
-		let update_best_entry = match (
-			best_entry.as_ref().and_then(|a| a.value.as_ref()),
-			pending_value.as_ref()
-		) {
-			(Some(best_value), Some(pending_value)) => best_value != pending_value,
-			(None, Some(_)) | (Some(_), None) => true,
-			(None, None) => false,
-		};
-		if !update_best_entry {
-			return None;
-		}
-
-		let valid_from_key = number_to_lookup_key(valid_from);
-		transaction.put(COLUMN_META, self.meta_key, &valid_from_key);
-		transaction.put(self.column, &valid_from_key, &StorageEntry {
-			prev_valid_from: best_entry.map(|b| b.valid_from),
-			value: pending_value.clone(),
-		}.encode());
-
-		Some(Entry {
-			valid_from,
-			value: pending_value,
-		})
-	}
-
-	/// Updates the best in-memory cache entry. Must be called after transaction with changes
-	/// from commit_best_entry has been committed.
-	#[allow(unused)]
-	pub fn update_best_entry(&self, best_entry: Option<Entry<NumberFor<Block>, T>>) {
-		*self.best_entry.write() = best_entry;
-	}
-
-	/// Prune all entries from the beginning up to the block (including entry at the number). Returns
-	/// the number of pruned entries. Pruning never deletes the latest entry in the cache.
-	#[allow(unused)]
-	pub fn prune_entries(
-		&self,
-		transaction: &mut DBTransaction,
-		last_to_prune: NumberFor<Block>
-	) -> ClientResult<usize> {
-		// find the last entry we want to keep
-		let mut last_entry_to_keep = match self.best_entry() {
-			Some(best_entry) => best_entry.valid_from,
-			None => return Ok(0),
-		};
-		let mut first_entry_to_remove = last_entry_to_keep;
-		while first_entry_to_remove > last_to_prune {
-			last_entry_to_keep = first_entry_to_remove;
-
-			let entry = read_storage_entry::<Block, T>(&*self.db, self.column, first_entry_to_remove)?
-				.expect("entry referenced from the next entry; entry exists when referenced; qed");
-			// if we have reached the first list entry
-			// AND all list entries are for blocks that are later than last_to_prune
-			// => nothing to prune
-			first_entry_to_remove = match entry.prev_valid_from {
-				Some(prev_valid_from) => prev_valid_from,
-				None => return Ok(0),
-			}
-		}
-
-		// remove all entries, starting from entry_to_remove
-		let mut pruned = 0;
-		let mut entry_to_remove = Some(first_entry_to_remove);
-		while let Some(current_entry) = entry_to_remove {
-			let entry = read_storage_entry::<Block, T>(&*self.db, self.column, current_entry)?
-				.expect("referenced entry exists; entry_to_remove is a reference to the entry; qed");
-
-			if current_entry != last_entry_to_keep {
-				transaction.delete(self.column, &number_to_lookup_key(current_entry));
-				pruned += 1;
-			}
-			entry_to_remove = entry.prev_valid_from;
-		}
-
-		let mut entry = read_storage_entry::<Block, T>(&*self.db, self.column, last_entry_to_keep)?
-			.expect("last_entry_to_keep >= first_entry_to_remove; that means that we're leaving this entry in the db; qed");
-		entry.prev_valid_from = None;
-		transaction.put(self.column, &number_to_lookup_key(last_entry_to_keep), &entry.encode());
-
-		Ok(pruned)
-	}
-
-	/// Reads the cached value, actual at given block. Returns None if the value was not cached
-	/// or if it has been pruned.
-	fn value_at_key(&self, key: BlockLookupKey) -> ClientResult<Option<T>> {
-		let at = lookup_key_to_number::<NumberFor<Block>>(&key)?;
-		let best_valid_from = match self.best_entry() {
-			// there are entries in cache
-			Some(best_entry) => {
-				// we're looking for the best value
-				if at >= best_entry.valid_from {
-					return Ok(best_entry.value);
-				}
-
-				// we're looking for the value of older blocks
-				best_entry.valid_from
-			},
-			// there are no entries in the cache
-			None => return Ok(None),
-		};
-
-		let mut entry = read_storage_entry::<Block, T>(&*self.db, self.column, best_valid_from)?
-			.expect("self.best_entry().is_some() if there's entry for best_valid_from; qed");
-		loop {
-			let prev_valid_from = match entry.prev_valid_from {
-				Some(prev_valid_from) => prev_valid_from,
-				None => return Ok(None),
-			};
-
-			let prev_entry = read_storage_entry::<Block, T>(&*self.db, self.column, prev_valid_from)?
-				.expect("entry referenced from the next entry; entry exists when referenced; qed");
-			if at >= prev_valid_from {
-				return Ok(prev_entry.value);
-			}
-
-			entry = prev_entry;
-		}
-	}
-}
-
-/// Reads the entry at the block with given number.
-fn read_storage_entry<Block, T>(
-	db: &KeyValueDB,
-	column: Option<u32>,
-	number: NumberFor<Block>
-) -> ClientResult<Option<StorageEntry<NumberFor<Block>, T>>>
-	where
-		Block: BlockT,
-		NumberFor<Block>: As<u64>,
-		T: Codec,
-{
-	db.get(column, &number_to_lookup_key(number))
-		.and_then(|entry| match entry {
-			Some(entry) => Ok(StorageEntry::<NumberFor<Block>, T>::decode(&mut &entry[..])),
-			None => Ok(None),
-		})
-	.map_err(db_err)
-}
-
-#[cfg(test)]
-mod tests {
-	use runtime_primitives::testing::Block as RawBlock;
-	use light::{AUTHORITIES_ENTRIES_TO_KEEP, columns, LightStorage};
-	use light::tests::insert_block;
-	use super::*;
-
-	type Block = RawBlock<u64>;
-
-	#[test]
-	fn authorities_storage_entry_serialized() {
-		let test_cases: Vec<StorageEntry<u64, Vec<AuthorityId>>> = vec![
-			StorageEntry { prev_valid_from: Some(42), value: Some(vec![[1u8; 32].into()]) },
-			StorageEntry { prev_valid_from: None, value: Some(vec![[1u8; 32].into(), [2u8; 32].into()]) },
-			StorageEntry { prev_valid_from: None, value: None },
-		];
-
-		for expected in test_cases {
-			let serialized = expected.encode();
-			let deserialized = StorageEntry::decode(&mut &serialized[..]).unwrap();
-			assert_eq!(expected, deserialized);
-		}
-	}
-
-	#[test]
-	#[ignore] // TODO: unignore when cache reinstated.
-	fn best_authorities_are_updated() {
-		let db = LightStorage::new_test();
-		let authorities_at: Vec<(usize, Option<Entry<u64, Vec<AuthorityId>>>)> = vec![
-			(0, None),
-			(0, None),
-			(1, Some(Entry { valid_from: 1, value: Some(vec![[2u8; 32].into()]) })),
-			(1, Some(Entry { valid_from: 1, value: Some(vec![[2u8; 32].into()]) })),
-			(2, Some(Entry { valid_from: 3, value: Some(vec![[4u8; 32].into()]) })),
-			(2, Some(Entry { valid_from: 3, value: Some(vec![[4u8; 32].into()]) })),
-			(3, Some(Entry { valid_from: 5, value: None })),
-			(3, Some(Entry { valid_from: 5, value: None })),
-		];
-
-		// before any block, there are no entries in cache
-		assert!(db.cache().authorities_at_cache().best_entry().is_none());
-		assert_eq!(db.db().iter(columns::AUTHORITIES).count(), 0);
-
-		// insert blocks and check that best_authorities() returns correct result
-		let mut prev_hash = Default::default();
-		for number in 0..authorities_at.len() {
-			let authorities_at_number = authorities_at[number].1.clone().and_then(|e| e.value);
-			prev_hash = insert_block(&db, &prev_hash, number as u64, authorities_at_number);
-			assert_eq!(db.cache().authorities_at_cache().best_entry(), authorities_at[number].1);
-			assert_eq!(db.db().iter(columns::AUTHORITIES).count(), authorities_at[number].0);
-		}
-
-		// check that authorities_at() returns correct results for all retrospective blocks
-		for number in 1..authorities_at.len() + 1 {
-			assert_eq!(db.cache().authorities_at(BlockId::Number(number as u64)),
-				authorities_at.get(number + 1)
-					.or_else(|| authorities_at.last())
-					.unwrap().1.clone().and_then(|e| e.value));
-		}
-
-		// now check that cache entries are pruned when new blocks are inserted
-		let mut current_entries_count = authorities_at.last().unwrap().0;
-		let pruning_starts_at = AUTHORITIES_ENTRIES_TO_KEEP as usize;
-		for number in authorities_at.len()..authorities_at.len() + pruning_starts_at {
-			prev_hash = insert_block(&db, &prev_hash, number as u64, None);
-			if number > pruning_starts_at {
-				let prev_entries_count = authorities_at[number - pruning_starts_at].0;
-				let entries_count = authorities_at.get(number - pruning_starts_at + 1).map(|e| e.0)
-					.unwrap_or_else(|| authorities_at.last().unwrap().0);
-				current_entries_count -= entries_count - prev_entries_count;
-			}
-
-			// there's always at least 1 entry in the cache (after first insertion)
-			assert_eq!(db.db().iter(columns::AUTHORITIES).count(), ::std::cmp::max(current_entries_count, 1));
-		}
-	}
-
-	#[test]
-	fn best_authorities_are_pruned() {
-		let db = LightStorage::<Block>::new_test();
-		let mut transaction = DBTransaction::new();
-
-		// insert first entry at block#100
-		db.cache().authorities_at_cache().update_best_entry(
-			db.cache().authorities_at_cache().commit_best_entry(&mut transaction, 100, Some(vec![[1u8; 32].into()])));
-		db.db().write(transaction).unwrap();
-
-		// no entries are pruned, since there's only one entry in the cache
-		let mut transaction = DBTransaction::new();
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 50).unwrap(), 0);
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 100).unwrap(), 0);
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 0);
-
-		// insert second entry at block#200
-		let mut transaction = DBTransaction::new();
-		db.cache().authorities_at_cache().update_best_entry(
-			db.cache().authorities_at_cache().commit_best_entry(&mut transaction, 200, Some(vec![[2u8; 32].into()])));
-		db.db().write(transaction).unwrap();
-
-		let mut transaction = DBTransaction::new();
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 50).unwrap(), 0);
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 100).unwrap(), 1);
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 1);
-		// still only 1 entry is removed since pruning never deletes the last entry
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 200).unwrap(), 1);
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 250).unwrap(), 1);
-
-		// physically remove entry for block#100 from db
-		let mut transaction = DBTransaction::new();
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 1);
-		db.db().write(transaction).unwrap();
-
-		assert_eq!(db.cache().authorities_at_cache().best_entry().unwrap().value, Some(vec![[2u8; 32].into()]));
-		assert_eq!(db.cache().authorities_at(BlockId::Number(50)), None);
-		assert_eq!(db.cache().authorities_at(BlockId::Number(100)), None);
-		assert_eq!(db.cache().authorities_at(BlockId::Number(150)), None);
-		assert_eq!(db.cache().authorities_at(BlockId::Number(200)), Some(vec![[2u8; 32].into()]));
-		assert_eq!(db.cache().authorities_at(BlockId::Number(250)), Some(vec![[2u8; 32].into()]));
-
-		// try to delete last entry => failure (no entries are removed)
-		let mut transaction = DBTransaction::new();
-		assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 300).unwrap(), 0);
-		db.db().write(transaction).unwrap();
-
-		assert_eq!(db.cache().authorities_at_cache().best_entry().unwrap().value, Some(vec![[2u8; 32].into()]));
-		assert_eq!(db.cache().authorities_at(BlockId::Number(50)), None);
-		assert_eq!(db.cache().authorities_at(BlockId::Number(100)), None);
-		assert_eq!(db.cache().authorities_at(BlockId::Number(150)), None);
-		assert_eq!(db.cache().authorities_at(BlockId::Number(200)), Some(vec![[2u8; 32].into()]));
-		assert_eq!(db.cache().authorities_at(BlockId::Number(250)), Some(vec![[2u8; 32].into()]));
-	}
-}
diff --git a/substrate/core/client/db/src/cache/list_cache.rs b/substrate/core/client/db/src/cache/list_cache.rs
new file mode 100644
index 0000000000000000000000000000000000000000..50d227c165b3d41ccf9475b1c846c41b29839c07
--- /dev/null
+++ b/substrate/core/client/db/src/cache/list_cache.rs
@@ -0,0 +1,1383 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
+
+//! List-based cache.
+//!
+//! Maintains several lists, containing nodes that are inserted whenever
+//! cached value at new block differs from the value at previous block.
+//! Example:
+//! B1(a) <--- B2(b) <--- B3(b) <--- B4(c)
+//!            N1(b) <-------------- N2(c)
+//!
+//! There's single list for all finalized blocks and >= 0 lists for unfinalized
+//! blocks.
+//! When new non-final block is inserted (with value that differs from the value
+//! at parent), it starts new unfinalized fork.
+//! When new final block is inserted (with value that differs from the value at
+//! parent), new entry is appended to the finalized fork.
+//! When existing non-final block is finalized (with value that differs from the
+//! value at parent), new entry is appended to the finalized fork AND unfinalized
+//! fork is dropped.
+//!
+//! Entries from abandoned unfinalized forks (forks that are forking from block B
+//! which is ascendant of the best finalized block) are deleted when block F with
+//! number B.number (i.e. 'parallel' canon block) is finalized.
+//!
+//! Finalized entry E1 is pruned when block B is finalized so that:
+//! EntryAt(B.number - prune_depth).points_to(E1)
+
+use std::collections::BTreeSet;
+
+use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult};
+use runtime_primitives::traits::{Block as BlockT, NumberFor, As, Zero};
+
+use cache::{CacheItemT, ComplexBlockId};
+use cache::list_entry::{Entry, StorageEntry};
+use cache::list_storage::{Storage, StorageTransaction, Metadata};
+
+/// List-based cache.
+pub struct ListCache<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> {
+	/// Cache storage.
+	storage: S,
+	/// Prune depth.
+	prune_depth: NumberFor<Block>,
+	/// Best finalized block.
+	best_finalized_block: ComplexBlockId<Block>,
+	/// Best finalized entry (if exists).
+	best_finalized_entry: Option<Entry<Block, T>>,
+	/// All unfinalized 'forks'.
+	unfinalized: Vec<Fork<Block, T>>,
+}
+
+/// All possible list cache operations that could be performed after transaction is committed.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum CommitOperation<Block: BlockT, T: CacheItemT> {
+	/// New block is appended to the fork without changing the cached value.
+	AppendNewBlock(usize, ComplexBlockId<Block>),
+	/// New block is appended to the fork with the different value.
+	AppendNewEntry(usize, Entry<Block, T>),
+	/// New fork is added with the given head entry.
+	AddNewFork(Entry<Block, T>),
+	/// New block is finalized and possibly:
+	/// - new entry is finalized AND/OR
+	/// - some forks are destroyed
+	BlockFinalized(ComplexBlockId<Block>, Option<Entry<Block, T>>, BTreeSet<usize>),
+}
+
+/// Single fork of list-based cache.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Fork<Block: BlockT, T> {
+	/// The best block of this fork. We do not save this field in the database to avoid
+	/// extra updates => it could be None after restart. It will be either filled when
+	/// the block is appended to this fork, or the whole fork will be abandoned when the
+	/// block from the other fork is finalized
+	best_block: Option<ComplexBlockId<Block>>,
+	/// The head entry of this fork.
+	head: Entry<Block, T>,
+}
+
+/// Outcome of Fork::try_append_or_fork. 
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub enum ForkAppendResult<Block: BlockT> {
+	/// New entry should be appended to the end of the fork.
+	Append,
+	/// New entry should be forked from the fork, starting with entry at given block.
+	Fork(ComplexBlockId<Block>),
+}
+
+impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S> {
+	/// Create new db list cache entry.
+	pub fn new(storage: S, prune_depth: NumberFor<Block>, best_finalized_block: ComplexBlockId<Block>) -> Self {
+		let (best_finalized_entry, unfinalized) = storage.read_meta()
+			.and_then(|meta| read_forks(&storage, meta))
+			.unwrap_or_else(|error| {
+				warn!(target: "db", "Unable to initialize list cache: {}. Restarting", error);
+				(None, Vec::new())
+			});
+
+		ListCache {
+			storage,
+			prune_depth,
+			best_finalized_block,
+			best_finalized_entry,
+			unfinalized,
+		}
+	}
+
+	/// Get reference to the storage.
+	pub fn storage(&self) -> &S {
+		&self.storage
+	}
+
+	/// Get value valid at block.
+	pub fn value_at_block(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<T>> {
+		let head = if at.number <= self.best_finalized_block.number {
+			// if the block is older than the best known finalized block
+			// => we're should search for the finalized value
+
+			// BUT since we're not guaranteeing to provide correct values for forks
+			// behind the finalized block, check if the block is finalized first
+			if !chain::is_finalized_block(&self.storage, at, As::sa(::std::u64::MAX))? {
+				return Ok(None);
+			}
+
+			self.best_finalized_entry.as_ref()
+		} else if self.unfinalized.is_empty() {
+			// there are no unfinalized entries
+			// => we should search for the finalized value
+			self.best_finalized_entry.as_ref()
+		} else {
+			// there are unfinalized entries
+			// => find the fork containing given block and read from this fork
+			// IF there's no matching fork, ensure that this isn't a block from a fork that has forked
+			// behind the best finalized block and search at finalized fork
+
+			match self.find_unfinalized_fork(at)? {
+				Some(fork) => Some(&fork.head),
+				None => match self.best_finalized_entry.as_ref() {
+					Some(best_finalized_entry) if chain::is_connected_to_block(&self.storage, &best_finalized_entry.valid_from, at)? =>
+						Some(best_finalized_entry),
+					_ => None,
+				},
+			}
+		};
+
+		match head {
+			Some(head) => head.search_best_before(&self.storage, at.number, true)
+				.map(|e| e.and_then(|e| e.0.value)),
+			None => Ok(None),
+		}
+	}
+
+	/// When new block is inserted into database.
+	pub fn on_block_insert<Tx: StorageTransaction<Block, T>>(
+		&self,
+		tx: &mut Tx,
+		parent: ComplexBlockId<Block>,
+		block: ComplexBlockId<Block>,
+		value: Option<T>,
+		is_final: bool,
+	) -> ClientResult<Option<CommitOperation<Block, T>>> {
+		// this guarantee is currently provided by LightStorage && we're relying on it here
+		debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash);
+
+		// we do not store any values behind finalized
+		if block.number != Zero::zero() && self.best_finalized_block.number >= block.number {
+			return Ok(None);
+		}
+
+		// if the block is not final, it is possibly appended to/forking from existing unfinalized fork
+		if !is_final {
+			let mut fork_and_action = None;
+
+			// first: try to find fork that is known to has the best block we're appending to
+			for (index, fork) in self.unfinalized.iter().enumerate() {
+				if fork.try_append(&parent) {
+					fork_and_action = Some((index, ForkAppendResult::Append));
+					break;
+				}
+			}
+
+			// if not found, check cases:
+			// - we're appending to the fork for the first time after restart;
+			// - we're forking existing unfinalized fork from the middle;
+			if fork_and_action.is_none() {
+				let best_finalized_entry_block = self.best_finalized_entry.as_ref().map(|f| f.valid_from.number);
+				for (index, fork) in self.unfinalized.iter().enumerate() {
+					if let Some(action) = fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? {
+						fork_and_action = Some((index, action));
+						break;
+					}
+				}
+			}
+
+			// if we have found matching unfinalized fork => early exit
+			match fork_and_action {
+				// append to unfinalized fork
+				Some((index, ForkAppendResult::Append)) => {
+					let new_storage_entry = match self.unfinalized[index].head.try_update(value) {
+						Some(new_storage_entry) => new_storage_entry,
+						None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))),
+					};
+
+					tx.insert_storage_entry(&block, &new_storage_entry);
+					let operation = CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block));
+					tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+					return Ok(Some(operation));
+				},
+				// fork from the middle of unfinalized fork
+				Some((_, ForkAppendResult::Fork(prev_valid_from))) => {
+					// it is possible that we're inserting extra (but still required) fork here
+					let new_storage_entry = StorageEntry {
+						prev_valid_from: Some(prev_valid_from),
+						value,
+					};
+
+					tx.insert_storage_entry(&block, &new_storage_entry);
+					let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block));
+					tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+					return Ok(Some(operation));
+				},
+				None => (),
+			}
+		}
+
+		// if we're here, then one of following is true:
+		// - either we're inserting final block => all ancestors are already finalized AND the only thing we can do
+		//   is to try to update last finalized entry
+		// - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks
+
+		let new_storage_entry = match self.best_finalized_entry.as_ref() {
+			Some(best_finalized_entry) => best_finalized_entry.try_update(value),
+			None if value.is_some() => Some(StorageEntry { prev_valid_from: None, value }),
+			None => None,
+		};
+
+		if !is_final {
+			return Ok(match new_storage_entry {
+				Some(new_storage_entry) => {
+					tx.insert_storage_entry(&block, &new_storage_entry);
+					let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block));
+					tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+					Some(operation)
+				},
+				None => None,
+			});
+		}
+
+		// cleanup database from abandoned unfinalized forks and obsolete finalized entries
+		let abandoned_forks = self.destroy_abandoned_forks(tx, &block);
+		self.prune_finalized_entries(tx, &block);
+
+		match new_storage_entry {
+			Some(new_storage_entry) => {
+				tx.insert_storage_entry(&block, &new_storage_entry);
+				let operation = CommitOperation::BlockFinalized(block.clone(), Some(new_storage_entry.into_entry(block)), abandoned_forks);
+				tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+				Ok(Some(operation))
+			},
+			None => Ok(Some(CommitOperation::BlockFinalized(block, None, abandoned_forks))),
+		}
+	}
+
+	/// When previously inserted block is finalized.
+	pub fn on_block_finalize<Tx: StorageTransaction<Block, T>>(
+		&self,
+		tx: &mut Tx,
+		parent: ComplexBlockId<Block>,
+		block: ComplexBlockId<Block>,
+	) -> ClientResult<Option<CommitOperation<Block, T>>> {
+		// this guarantee is currently provided by LightStorage && we're relying on it here
+		debug_assert_eq!(self.best_finalized_block.hash, parent.hash);
+
+		// there could be at most one entry that is finalizing
+		let finalizing_entry = self.storage.read_entry(&block)?
+			.map(|entry| entry.into_entry(block.clone()));
+
+		// cleanup database from abandoned unfinalized forks and obsolete finalized entries
+		let abandoned_forks = self.destroy_abandoned_forks(tx, &block);
+		self.prune_finalized_entries(tx, &block);
+
+		let update_meta = finalizing_entry.is_some();
+		let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks);
+		if update_meta {
+			tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation);
+		}
+		Ok(Some(operation))
+	}
+
+	/// When transaction is committed.
+	pub fn on_transaction_commit(&mut self, op: CommitOperation<Block, T>) {
+		match op {
+			CommitOperation::AppendNewBlock(index, best_block) => {
+				let mut fork = self.unfinalized.get_mut(index)
+					.expect("ListCache is a crate-private type;
+						internal clients of ListCache are committing transaction while cache is locked;
+						CommitOperation holds valid references while cache is locked; qed");
+				fork.best_block = Some(best_block);
+			},
+			CommitOperation::AppendNewEntry(index, entry) => {
+				let mut fork = self.unfinalized.get_mut(index)
+					.expect("ListCache is a crate-private type;
+						internal clients of ListCache are committing transaction while cache is locked;
+						CommitOperation holds valid references while cache is locked; qed");
+				fork.best_block = Some(entry.valid_from.clone());
+				fork.head = entry;
+			},
+			CommitOperation::AddNewFork(entry) => {
+				self.unfinalized.push(Fork {
+					best_block: Some(entry.valid_from.clone()),
+					head: entry,
+				});
+			},
+			CommitOperation::BlockFinalized(block, finalizing_entry, forks) => {
+				self.best_finalized_block = block;
+				if let Some(finalizing_entry) = finalizing_entry {
+					self.best_finalized_entry = Some(finalizing_entry);
+				}
+				for fork_index in forks.iter().rev() {
+					self.unfinalized.remove(*fork_index);
+				}
+			},
+		}
+	}
+
+	/// Prune old finalized entries.
+	fn prune_finalized_entries<Tx: StorageTransaction<Block, T>>(
+		&self,
+		tx: &mut Tx,
+		block: &ComplexBlockId<Block>
+	) {
+		let mut do_pruning = || -> ClientResult<()> {
+			// calculate last ancient block number
+			let ancient_block = match block.number.as_().checked_sub(self.prune_depth.as_()) {
+				Some(number) => match self.storage.read_id(As::sa(number))? {
+					Some(hash) => ComplexBlockId::new(hash, As::sa(number)),
+					None => return Ok(()),
+				},
+				None => return Ok(()),
+			};
+
+			// if there's an entry at this block:
+			// - remove reference from this entry to the previous entry
+			// - destroy fork starting with previous entry 
+			let current_entry = match self.storage.read_entry(&ancient_block)? {
+				Some(current_entry) => current_entry,
+				None => return Ok(()),
+			};
+			let first_entry_to_truncate = match current_entry.prev_valid_from {
+				Some(prev_valid_from) => prev_valid_from,
+				None => return Ok(()),
+			};
+
+			// truncate ancient entry
+			tx.insert_storage_entry(&ancient_block, &StorageEntry {
+				prev_valid_from: None,
+				value: current_entry.value,
+			});
+
+			// destroy 'fork' ending with previous entry
+			Fork { best_block: None, head: Entry { valid_from: first_entry_to_truncate, value: None } }
+				.destroy(&self.storage, tx, None)
+		};
+
+		if let Err(error) = do_pruning() {
+			warn!(target: "db", "Failed to prune ancient cache entries: {}", error);
+		}
+	}
+
+	/// Try to destroy abandoned forks (forked before best finalized block) when block is finalized.
+	fn destroy_abandoned_forks<Tx: StorageTransaction<Block, T>>(
+		&self,
+		tx: &mut Tx,
+		block: &ComplexBlockId<Block>
+	) -> BTreeSet<usize> {
+		let mut destroyed = BTreeSet::new();
+		for (index, fork) in self.unfinalized.iter().enumerate() {
+			if fork.head.valid_from.number == block.number {
+				destroyed.insert(index);
+				if fork.head.valid_from.hash != block.hash {
+					if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) {
+						warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error);
+					}
+				}
+			}
+		}
+
+		destroyed
+	}
+
+	/// Search unfinalized fork where given block belongs.
+	fn find_unfinalized_fork(&self, block: &ComplexBlockId<Block>) -> ClientResult<Option<&Fork<Block, T>>> {
+		for unfinalized in &self.unfinalized {
+			if unfinalized.matches(&self.storage, block)? {
+				return Ok(Some(&unfinalized));
+			}
+		}
+
+		Ok(None)
+	}
+}
+
+impl<Block: BlockT, T: CacheItemT> Fork<Block, T> {
+	/// Get reference to the head entry of this fork.
+	pub fn head(&self) -> &Entry<Block, T> {
+		&self.head
+	}
+
+	/// Check if the block is the part of the fork.
+	pub fn matches<S: Storage<Block, T>>(
+		&self,
+		storage: &S,
+		block: &ComplexBlockId<Block>,
+	) -> ClientResult<bool> {
+		let range = self.head.search_best_range_before(storage, block.number)?;
+		match range {
+			None => Ok(false),
+			Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())),
+		}
+	}
+
+	/// Try to append NEW block to the fork. This method willonly 'work' (return true) when block
+	/// is actually appended to the fork AND the best known block of the fork is known (i.e. some
+	/// block has been already appended to this fork after last restart).
+	pub fn try_append(&self, parent: &ComplexBlockId<Block>) -> bool {
+		// when the best block of the fork is known, the check is trivial
+		//
+		// most of calls will hopefully end here, because best_block is only unknown
+		// after restart and until new block is appended to the fork
+		self.best_block.as_ref() == Some(parent)
+	}
+
+	/// Try to append new block to the fork OR fork it.
+	pub fn try_append_or_fork<S: Storage<Block, T>>(
+		&self,
+		storage: &S,
+		parent: &ComplexBlockId<Block>,
+		best_finalized_entry_block: Option<NumberFor<Block>>,
+	) -> ClientResult<Option<ForkAppendResult<Block>>> {
+		// try to find entries that are (possibly) surrounding the parent block
+		let range = self.head.search_best_range_before(storage, parent.number)?;
+		let begin = match range {
+			Some((begin, _)) => begin,
+			None => return Ok(None),
+		};
+
+		// check if the parent is connected to the beginning of the range
+		if !chain::is_connected_to_block(storage, &parent, &begin)? {
+			return Ok(None);
+		}
+
+		// the block is connected to the begin-entry. If begin is the head entry
+		// => we need to append new block to the fork
+		if begin == self.head.valid_from {
+			return Ok(Some(ForkAppendResult::Append));
+		}
+
+		// the parent block belongs to this fork AND it is located after last finalized entry
+		// => we need to make a new fork
+		if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) {
+			return Ok(Some(ForkAppendResult::Fork(begin)));
+		}
+
+		Ok(None)
+	}
+
+	/// Destroy fork by deleting all unfinalized entries.
+	pub fn destroy<S: Storage<Block, T>, Tx: StorageTransaction<Block, T>>(
+		&self,
+		storage: &S,
+		tx: &mut Tx,
+		best_finalized_block: Option<NumberFor<Block>>,
+	) -> ClientResult<()> {
+		let mut current = self.head.valid_from.clone();
+		loop {
+			// optionally: deletion stops when we found entry at finalized block
+			if let Some(best_finalized_block) = best_finalized_block {
+				if chain::is_finalized_block(storage, &current, best_finalized_block)? {
+					return Ok(());
+				}
+			}
+
+			// read pointer to previous entry
+			let entry = storage.require_entry(&current)?;
+			tx.remove_storage_entry(&current);
+
+			// deletion stops when there are no more entries in the list
+			current = match entry.prev_valid_from {
+				Some(prev_valid_from) => prev_valid_from,
+				None => return Ok(()),
+			};
+		}
+	}
+}
+
+/// Blockchain related functions.
+mod chain {
+	use runtime_primitives::traits::Header as HeaderT;
+	use super::*;
+
+	/// Is the block1 connected both ends of the range.
+	pub fn is_connected_to_range<Block: BlockT, T: CacheItemT, S: Storage<Block, T>>(
+		storage: &S,
+		block: &ComplexBlockId<Block>,
+		range: (&ComplexBlockId<Block>, Option<&ComplexBlockId<Block>>),
+	) -> ClientResult<bool> {
+		let (begin, end) = range;
+		Ok(is_connected_to_block(storage, block, begin)?
+			&& match end {
+				Some(end) => is_connected_to_block(storage, block, end)?,
+				None => true,
+			})
+	}
+
+	/// Is the block1 directly connected (i.e. part of the same fork) to block2?
+	pub fn is_connected_to_block<Block: BlockT, T: CacheItemT, S: Storage<Block, T>>(
+		storage: &S,
+		block1: &ComplexBlockId<Block>,
+		block2: &ComplexBlockId<Block>,
+	) -> ClientResult<bool> {
+		let (begin, end) = if block1 > block2 { (block2, block1) } else { (block1, block2) };
+		let mut current = storage.read_header(&end.hash)?
+			.ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", end.hash)))?;
+		while *current.number() > begin.number {
+			current = storage.read_header(current.parent_hash())?
+				.ok_or_else(|| ClientErrorKind::UnknownBlock(format!("{}", current.parent_hash())))?;
+		}
+
+		Ok(begin.hash == current.hash())
+	}
+
+	/// Returns true if the given block is finalized.
+	pub fn is_finalized_block<Block: BlockT, T: CacheItemT, S: Storage<Block, T>>(
+		storage: &S,
+		block: &ComplexBlockId<Block>,
+		best_finalized_block: NumberFor<Block>,
+	) -> ClientResult<bool> {
+		if block.number > best_finalized_block {
+			return Ok(false);
+		}
+
+		storage.read_id(block.number)
+			.map(|hash| hash.as_ref() == Some(&block.hash))
+	}
+}
+
+/// Read list cache forks at blocks IDs.
+fn read_forks<Block: BlockT, T: CacheItemT, S: Storage<Block, T>>(
+	storage: &S,
+	meta: Metadata<Block>,
+) -> ClientResult<(Option<Entry<Block, T>>, Vec<Fork<Block, T>>)> {
+	let finalized = match meta.finalized {
+		Some(finalized) => Some(storage.require_entry(&finalized)?
+			.into_entry(finalized)),
+		None => None,
+	};
+
+	let unfinalized = meta.unfinalized.into_iter()
+		.map(|unfinalized| storage.require_entry(&unfinalized)
+			.map(|storage_entry| Fork {
+				best_block: None,
+				head: storage_entry.into_entry(unfinalized),
+			}))
+		.collect::<Result<_, _>>()?;
+
+	Ok((finalized, unfinalized))
+}
+
+#[cfg(test)]
+pub mod tests {
+	use runtime_primitives::testing::{Header, Block as RawBlock};
+	use runtime_primitives::traits::Header as HeaderT;
+	use cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction};
+	use super::*;
+
+	type Block = RawBlock<u64>;
+
+	pub fn test_id(number: u64) -> ComplexBlockId<Block> {
+		ComplexBlockId::new(From::from(number), number)
+	}
+
+	fn correct_id(number: u64) -> ComplexBlockId<Block> {
+		ComplexBlockId::new(test_header(number).hash(), number)
+	}
+
+	fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId<Block> {
+		ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number)
+	}
+
+	fn test_header(number: u64) -> Header {
+		Header {
+			parent_hash: if number == 0 { Default::default() } else { test_header(number - 1).hash() },
+			number,
+			state_root: Default::default(),
+			extrinsics_root: Default::default(),
+			digest: Default::default(),
+		}
+	}
+
+	fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header {
+		if fork_from == number {
+			test_header(number)
+		} else {
+			Header {
+				parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(),
+				number,
+				state_root: (1 + fork_nonce).into(),
+				extrinsics_root: Default::default(),
+				digest: Default::default(),
+			}
+		}
+	}
+
+	#[test]
+	fn list_value_at_block_works() {
+		// when block is earlier than best finalized block AND it is not finalized
+		// --- 50 ---
+		// ----------> [100]
+		assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), 1024, test_id(100))
+			.value_at_block(&test_id(50)).unwrap(), None);
+		// when block is earlier than best finalized block AND it is finalized AND value is empty
+		// [30] ---- 50 ---> [100]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(test_id(100)), Vec::new())
+				.with_id(50, 50.into())
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+				.with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: None }),
+			1024, test_id(100)
+		).value_at_block(&test_id(50)).unwrap(), None);
+		// when block is earlier than best finalized block AND it is finalized AND value is some
+		// [30] ---- 50 ---> [100]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(test_id(100)), Vec::new())
+				.with_id(50, 50.into())
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+				.with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }),
+			1024, test_id(100)
+		).value_at_block(&test_id(50)).unwrap(), Some(30));
+		// when block is the best finalized block AND value is some
+		// ---> [100]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(test_id(100)), Vec::new())
+				.with_id(100, 100.into())
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+				.with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }),
+			1024, test_id(100)
+		).value_at_block(&test_id(100)).unwrap(), Some(100));
+		// when block is parallel to the best finalized block
+		// ---- 100
+		// ---> [100]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(test_id(100)), Vec::new())
+				.with_id(50, 50.into())
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) })
+				.with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }),
+			1024, test_id(100)
+		).value_at_block(&ComplexBlockId::new(2.into(), 100)).unwrap(), None);
+
+		// when block is later than last finalized block AND there are no forks AND finalized value is None
+		// ---> [100] --- 200
+		assert_eq!(ListCache::<_, u64, _>::new(
+			DummyStorage::new()
+				.with_meta(Some(test_id(100)), Vec::new())
+				.with_id(50, 50.into())
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: None }),
+			1024, test_id(100)
+		).value_at_block(&test_id(200)).unwrap(), None);
+		// when block is later than last finalized block AND there are no forks AND finalized value is Some
+		// ---> [100] --- 200
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(test_id(100)), Vec::new())
+				.with_id(50, 50.into())
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }),
+			1024, test_id(100)
+		).value_at_block(&test_id(200)).unwrap(), Some(100));
+
+		// when block is later than last finalized block AND there are no matching forks
+		// AND block is connected to finalized block AND finalized value is None
+		//           --- 3
+		// ---> [2] /---------> [4]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(fork_header(0, 2, 3)),
+			1024, test_id(2)
+		).value_at_block(&fork_id(0, 2, 3)).unwrap(), None);
+		// when block is later than last finalized block AND there are no matching forks
+		// AND block is connected to finalized block AND finalized value is Some
+		//           --- 3
+		// ---> [2] /---------> [4]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(fork_header(0, 2, 3)),
+			1024, test_id(2)
+		).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2));
+		// when block is later than last finalized block AND there are no matching forks
+		// AND block is not connected to finalized block
+		//    ---   2  --- 3
+		// 1 /---> [2] ---------> [4]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_header(test_header(1))
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(fork_header(0, 1, 3))
+				.with_header(fork_header(0, 1, 2)),
+			1024, test_id(2)
+		).value_at_block(&fork_id(0, 1, 3)).unwrap(), None);
+
+		// when block is later than last finalized block AND it appends to unfinalized fork from the end
+		// AND unfinalized value is Some
+		// ---> [2] ---> [4] ---> 5
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_header(test_header(4))
+				.with_header(test_header(5)),
+			1024, test_id(2)
+		).value_at_block(&correct_id(5)).unwrap(), Some(4));
+		// when block is later than last finalized block AND it appends to unfinalized fork from the end
+		// AND unfinalized value is None
+		// ---> [2] ---> [4] ---> 5
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None })
+				.with_header(test_header(4))
+				.with_header(test_header(5)),
+			1024, test_id(2)
+		).value_at_block(&correct_id(5)).unwrap(), None);
+		// when block is later than last finalized block AND it fits to the middle of unfinalized fork
+		// AND unfinalized value is Some
+		// ---> [2] ---> [4] ---> 5 ---> [6]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(6)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: None })
+				.with_header(test_header(4))
+				.with_header(test_header(5))
+				.with_header(test_header(6)),
+			1024, test_id(2)
+		).value_at_block(&correct_id(5)).unwrap(), Some(4));
+		// when block is later than last finalized block AND it fits to the middle of unfinalized fork
+		// AND unfinalized value is None
+		// ---> [2] ---> [4] ---> 5 ---> [6]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(6)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None })
+				.with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: Some(4) })
+				.with_header(test_header(4))
+				.with_header(test_header(5))
+				.with_header(test_header(6)),
+			1024, test_id(2)
+		).value_at_block(&correct_id(5)).unwrap(), None);
+		// when block is later than last finalized block AND it does not fits unfinalized fork
+		// AND it is connected to the finalized block AND finalized value is Some
+		// ---> [2] ----------> [4]
+		//          \--- 3
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(fork_header(0, 2, 3)),
+			1024, test_id(2)
+		).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2));
+		// when block is later than last finalized block AND it does not fits unfinalized fork
+		// AND it is connected to the finalized block AND finalized value is Some
+		// ---> [2] ----------> [4]
+		//          \--- 3
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(fork_header(0, 2, 3)),
+			1024, test_id(2)
+		).value_at_block(&fork_id(0, 2, 3)).unwrap(), None);
+	}
+
+	#[test]
+	fn list_on_block_insert_works() {
+		// when trying to insert block < finalized number
+		assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
+			.on_block_insert(&mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), false).unwrap().is_none());
+		// when trying to insert block @ finalized number
+		assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
+			.on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none());
+	
+		// when trying to insert non-final block AND it appends to the best block of unfinalized fork
+		// AND new value is the same as in the fork' best block
+		let mut cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(None, vec![test_id(4)])
+				.with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: Some(4) }),
+			1024, test_id(2)
+		);
+		cache.unfinalized[0].best_block = Some(test_id(4));
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false).unwrap(),
+			Some(CommitOperation::AppendNewBlock(0, test_id(5))));
+		assert!(tx.inserted_entries().is_empty());
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.updated_meta().is_none());
+		// when trying to insert non-final block AND it appends to the best block of unfinalized fork
+		// AND new value is the same as in the fork' best block
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false).unwrap(),
+			Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) })));
+		assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] }));
+
+		// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
+		// AND new value is the same as in the fork' best block
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(None, vec![correct_id(4)])
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: Some(4) })
+				.with_header(test_header(4)),
+			1024, test_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false).unwrap(),
+			Some(CommitOperation::AppendNewBlock(0, correct_id(5))));
+		assert!(tx.inserted_entries().is_empty());
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.updated_meta().is_none());
+		// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
+		// AND new value is the same as in the fork' best block
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false).unwrap(),
+			Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) })));
+		assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }));
+
+		// when trying to insert non-final block AND it forks unfinalized fork
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(4)])
+				.with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) })
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4)),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false).unwrap(),
+			Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) })));
+		assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] }));
+
+		// when trying to insert non-final block AND there are no unfinalized forks
+		// AND value is the same as last finalized
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false).unwrap(), None);
+		assert!(tx.inserted_entries().is_empty());
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.updated_meta().is_none());
+		// when trying to insert non-final block AND there are no unfinalized forks
+		// AND value differs from last finalized
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false).unwrap(),
+			Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) })));
+		assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] }));
+
+		// when inserting finalized entry AND there are no previous finalzed entries
+		let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2));
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
+		assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }));
+		// when inserting finalized entry AND value is the same as in previous finalized
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())));
+		assert!(tx.inserted_entries().is_empty());
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.updated_meta().is_none());
+		// when inserting finalized entry AND value differs from previous finalized
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
+		assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }));
+
+		// inserting finalized entry removes abandoned fork EVEN if new entry is not inserted
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())));
+	}
+
+	#[test]
+	fn list_on_block_finalized_works() {
+		// finalization does not finalizes entry if it does not exists
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(5)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())));
+		assert!(tx.inserted_entries().is_empty());
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.updated_meta().is_none());
+		// finalization finalizes entry
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(5)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }),
+			1024, correct_id(4)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(5), Some(Entry { valid_from: correct_id(5), value: Some(5) }), vec![0].into_iter().collect())));
+		assert!(tx.inserted_entries().is_empty());
+		assert!(tx.removed_entries().is_empty());
+		assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] }));
+		// finalization removes abandoned forks
+		let cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }),
+			1024, correct_id(2)
+		);
+		let mut tx = DummyTransaction::new();
+		assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(),
+			Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())));
+	}
+
+	#[test]
+	fn list_transaction_commit_works() {
+		let mut cache = ListCache::new(
+			DummyStorage::new()
+				.with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)])
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) })
+				.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+				.with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: Some(6) }),
+			1024, correct_id(2)
+		);
+
+		// when new block is appended to unfinalized fork
+		cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6)));
+		assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6)));
+		// when new entry is appnded to unfinalized fork
+		cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: Some(7) }));
+		assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7)));
+		assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: Some(7) });
+		// when new fork is added
+		cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: Some(10) }));
+		assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10)));
+		assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: Some(10) });
+		// when block is finalized + entry is finalized + unfinalized forks are deleted
+		cache.on_transaction_commit(CommitOperation::BlockFinalized(correct_id(20), Some(Entry { valid_from: correct_id(20), value: Some(20) }), vec![0, 1, 2].into_iter().collect()));
+		assert_eq!(cache.best_finalized_block, correct_id(20));
+		assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: Some(20) }));
+		assert!(cache.unfinalized.is_empty());
+	}
+
+	#[test]
+	fn list_find_unfinalized_fork_works() {
+		// ----------> [3]
+		// --- [2] ---------> 4 ---> [5]
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)])
+				.with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) })
+				.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(test_header(5)),
+			1024, correct_id(0)
+		).find_unfinalized_fork(&correct_id(4)).unwrap().unwrap().head.valid_from, correct_id(5));
+		// --- [2] ---------------> [5]
+		// ----------> [3] ---> 4
+		assert_eq!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)])
+				.with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) })
+				.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(test_header(5))
+				.with_header(fork_header(0, 1, 2))
+				.with_header(fork_header(0, 1, 3))
+				.with_header(fork_header(0, 1, 4)),
+			1024, correct_id(0)
+		).find_unfinalized_fork(&fork_id(0, 1, 4)).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3));
+		// --- [2] ---------------> [5]
+		// ----------> [3]
+		// -----------------> 4
+		assert!(ListCache::new(
+			DummyStorage::new()
+				.with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)])
+				.with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) })
+				.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) })
+				.with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) })
+				.with_header(test_header(2))
+				.with_header(test_header(3))
+				.with_header(test_header(4))
+				.with_header(test_header(5))
+				.with_header(fork_header(0, 1, 3))
+				.with_header(fork_header(0, 1, 4))
+				.with_header(fork_header(1, 1, 2))
+				.with_header(fork_header(1, 1, 3))
+				.with_header(fork_header(1, 1, 4)),
+			1024, correct_id(0)
+		).find_unfinalized_fork(&fork_id(1, 1, 4)).unwrap().is_none());
+	}
+
+	#[test]
+	fn fork_matches_works() {
+		// when block is not within list range
+		let storage = DummyStorage::new()
+			.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+			.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) });
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.matches(&storage, &test_id(20)).unwrap(), false);
+		// when block is not connected to the begin block
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+			.with_header(test_header(5))
+			.with_header(test_header(4))
+			.with_header(test_header(3))
+			.with_header(fork_header(0, 2, 4))
+			.with_header(fork_header(0, 2, 3));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+			.matches(&storage, &fork_id(0, 2, 4)).unwrap(), false);
+		// when block is not connected to the end block
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+			.with_header(test_header(5))
+			.with_header(test_header(4))
+			.with_header(test_header(3))
+			.with_header(fork_header(0, 3, 4));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+			.matches(&storage, &fork_id(0, 3, 4)).unwrap(), false);
+		// when block is connected to the begin block AND end is open
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: Some(100) })
+			.with_header(test_header(5))
+			.with_header(test_header(6));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+			.matches(&storage, &correct_id(6)).unwrap(), true);
+		// when block is connected to the begin block AND to the end block
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+			.with_header(test_header(5))
+			.with_header(test_header(4))
+			.with_header(test_header(3));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+			.matches(&storage, &correct_id(4)).unwrap(), true);
+	}
+
+	#[test]
+	fn fork_try_append_works() {
+		// when best block is unknown
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.try_append(&test_id(100)), false);
+		// when best block is known but different
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.try_append(&test_id(101)), false);
+		// when best block is known and the same
+		assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: None } }
+			.try_append(&test_id(100)), true);
+	}
+
+	#[test]
+	fn fork_try_append_or_fork_works() {
+		// when there's no entry before parent
+		let storage = DummyStorage::new()
+			.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+			.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) });
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.try_append_or_fork(&storage, &test_id(30), None).unwrap(), None);
+		// when parent does not belong to the fork
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+			.with_header(test_header(5))
+			.with_header(test_header(4))
+			.with_header(test_header(3))
+			.with_header(fork_header(0, 2, 4))
+			.with_header(fork_header(0, 2, 3));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+			.try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None);
+		// when the entry before parent is the head entry
+		let storage = DummyStorage::new()
+			.with_entry(ComplexBlockId::new(test_header(5).hash(), 5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_header(test_header(6))
+			.with_header(test_header(5));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } }
+			.try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append));
+		// when the parent located after last finalized entry
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+			.with_header(test_header(6))
+			.with_header(test_header(5))
+			.with_header(test_header(4))
+			.with_header(test_header(3))
+			.with_header(fork_header(0, 4, 5));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } }
+			.try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3))));
+		// when the parent located before last finalized entry
+		let storage = DummyStorage::new()
+			.with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) })
+			.with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) })
+			.with_header(test_header(6))
+			.with_header(test_header(5))
+			.with_header(test_header(4))
+			.with_header(test_header(3))
+			.with_header(fork_header(0, 4, 5));
+		assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } }
+			.try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None);
+	}
+
+	#[test]
+	fn fork_destroy_works() {
+		// when we reached finalized entry without iterations
+		let storage = DummyStorage::new().with_id(100, 100.into());
+		let mut tx = DummyTransaction::new();
+		Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.destroy(&storage, &mut tx, Some(200)).unwrap();
+		assert!(tx.removed_entries().is_empty());
+		// when we reach finalized entry with iterations
+		let storage = DummyStorage::new()
+			.with_id(10, 10.into())
+			.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+			.with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(50) })
+			.with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) })
+			.with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: Some(10) })
+			.with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: Some(5) })
+			.with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: None });
+		let mut tx = DummyTransaction::new();
+		Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.destroy(&storage, &mut tx, Some(200)).unwrap();
+		assert_eq!(*tx.removed_entries(),
+			vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect());
+		// when we reach beginning of fork before finalized block
+		let storage = DummyStorage::new()
+			.with_id(10, 10.into())
+			.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+			.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) });
+		let mut tx = DummyTransaction::new();
+		Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } }
+			.destroy(&storage, &mut tx, Some(200)).unwrap();
+		assert_eq!(*tx.removed_entries(),
+			vec![test_id(100).hash, test_id(50).hash].into_iter().collect());
+	}
+
+	#[test]
+	fn is_connected_to_block_fails() {
+		// when storage returns error
+		assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, &test_id(1), &test_id(100)).is_err());
+		// when there's no header in the storage
+		assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), &test_id(100)).is_err());
+	}
+
+	#[test]
+	fn is_connected_to_block_works() {
+		// when without iterations we end up with different block
+		assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+			.with_header(test_header(1)),
+			&test_id(1), &correct_id(1)).unwrap(), false);
+		// when with ASC iterations we end up with different block
+		assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+			.with_header(test_header(0))
+			.with_header(test_header(1))
+			.with_header(test_header(2)),
+			&test_id(0), &correct_id(2)).unwrap(), false);
+		// when with DESC iterations we end up with different block
+		assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+			.with_header(test_header(0))
+			.with_header(test_header(1))
+			.with_header(test_header(2)),
+			&correct_id(2), &test_id(0)).unwrap(), false);
+		// when without iterations we end up with the same block
+		assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+			.with_header(test_header(1)),
+			&correct_id(1), &correct_id(1)).unwrap(), true);
+		// when with ASC iterations we end up with the same block
+		assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+			.with_header(test_header(0))
+			.with_header(test_header(1))
+			.with_header(test_header(2)),
+			&correct_id(0), &correct_id(2)).unwrap(), true);
+		// when with DESC iterations we end up with the same block
+		assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new()
+			.with_header(test_header(0))
+			.with_header(test_header(1))
+			.with_header(test_header(2)),
+			&correct_id(2), &correct_id(0)).unwrap(), true);
+	}
+
+	#[test]
+	fn is_finalized_block_fails() {
+		// when storage returns error
+		assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err());
+
+	}
+
+	#[test]
+	fn is_finalized_block_works() {
+		// when number of block is larger than last finalized block
+		assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), false);
+		// when there's no hash for this block number in the database
+		assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), false);
+		// when there's different hash for this block number in the database
+		assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new()
+			.with_id(1, From::from(2)), &test_id(1), 100).unwrap(), false);
+		// when there's the same hash for this block number in the database
+		assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new()
+			.with_id(1, From::from(1)), &test_id(1), 100).unwrap(), true);
+	}
+
+	#[test]
+	fn read_forks_fails() {
+		// when storage returns error during finalized entry read
+		assert!(read_forks::<Block, u64, _>(&FaultyStorage, Metadata {
+			finalized: Some(test_id(1)),
+			unfinalized: vec![],
+		}).is_err());
+		// when storage returns error during unfinalized entry read
+		assert!(read_forks::<Block, u64, _>(&FaultyStorage, Metadata {
+			finalized: None,
+			unfinalized: vec![test_id(1)],
+		}).is_err());
+		// when finalized entry is not found
+		assert!(read_forks::<Block, u64, _>(&DummyStorage::new(), Metadata {
+			finalized: Some(test_id(1)),
+			unfinalized: vec![],
+		}).is_err());
+		// when unfinalized entry is not found
+		assert!(read_forks::<Block, u64, _>(&DummyStorage::new(), Metadata {
+			finalized: None,
+			unfinalized: vec![test_id(1)],
+		}).is_err());
+	}
+
+	#[test]
+	fn read_forks_works() {
+		let storage = DummyStorage::new()
+			.with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(11) })
+			.with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: None })
+			.with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(33) });
+		let expected = (
+			Some(Entry { valid_from: test_id(10), value: Some(11) }),
+			vec![
+				Fork { best_block: None, head: Entry { valid_from: test_id(20), value: None } },
+				Fork { best_block: None, head: Entry { valid_from: test_id(30), value: Some(33) } },
+			],
+		);
+
+		assert_eq!(expected, read_forks(&storage, Metadata {
+			finalized: Some(test_id(10)),
+			unfinalized: vec![test_id(20), test_id(30)],
+		}).unwrap());
+	}
+
+	#[test]
+	fn ancient_entries_are_pruned() {
+		let cache = ListCache::new(DummyStorage::new()
+			.with_id(10, 10.into())
+			.with_id(20, 20.into())
+			.with_id(30, 30.into())
+			.with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: Some(10) })
+			.with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) })
+			.with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(30) }),
+		10, test_id(9));
+		let mut tx = DummyTransaction::new();
+
+		// when finalizing entry #10: no entries pruned
+		cache.prune_finalized_entries(&mut tx, &test_id(10));
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.inserted_entries().is_empty());
+		// when finalizing entry #19: no entries pruned
+		cache.prune_finalized_entries(&mut tx, &test_id(19));
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.inserted_entries().is_empty());
+		// when finalizing entry #20: no entries pruned
+		cache.prune_finalized_entries(&mut tx, &test_id(20));
+		assert!(tx.removed_entries().is_empty());
+		assert!(tx.inserted_entries().is_empty());
+		// when finalizing entry #30: entry 10 pruned + entry 20 is truncated
+		cache.prune_finalized_entries(&mut tx, &test_id(30));
+		assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect());
+		assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect());
+	}
+}
diff --git a/substrate/core/client/db/src/cache/list_entry.rs b/substrate/core/client/db/src/cache/list_entry.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bf29885fcffcb424b9184ed2362abb64e9c27dc5
--- /dev/null
+++ b/substrate/core/client/db/src/cache/list_entry.rs
@@ -0,0 +1,164 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
+
+//! List-cache storage entries.
+
+use client::error::Result as ClientResult;
+use runtime_primitives::traits::{Block as BlockT, NumberFor};
+
+use cache::{CacheItemT, ComplexBlockId};
+use cache::list_storage::{Storage};
+
+/// Single list-based cache entry.
+#[derive(Debug)]
+#[cfg_attr(test, derive(PartialEq))]
+pub struct Entry<Block: BlockT, T> {
+	/// first block, when this value became actual
+	pub valid_from: ComplexBlockId<Block>,
+	/// None means that we do not know the value starting from `valid_from` block
+	pub value: Option<T>,
+}
+
+/// Internal representation of the single list-based cache entry. The entry points to the
+/// previous entry in the cache, allowing us to traverse back in time in list-style.
+#[derive(Debug, Encode, Decode)]
+#[cfg_attr(test, derive(Clone, PartialEq))]
+pub struct StorageEntry<Block: BlockT, T: CacheItemT> {
+	/// None if valid from the beginning
+	pub prev_valid_from: Option<ComplexBlockId<Block>>,
+	/// None means that we do not know the value starting from `valid_from` block
+	pub value: Option<T>,
+}
+
+impl<Block: BlockT, T: CacheItemT> Entry<Block, T> {
+	/// Returns Some if the entry should be updated with the new value.
+	pub fn try_update(&self, value: Option<T>) -> Option<StorageEntry<Block, T>> {
+		match self.value == value {
+			true => None,
+			false => Some(StorageEntry {
+				prev_valid_from: Some(self.valid_from.clone()),
+				value,
+			}),
+		}
+	}
+
+	/// Wrapper that calls search_before to get range where the given block fits.
+	pub fn search_best_range_before<S: Storage<Block, T>>(
+		&self,
+		storage: &S,
+		block: NumberFor<Block>,
+	) -> ClientResult<Option<(ComplexBlockId<Block>, Option<ComplexBlockId<Block>>)>> {
+		Ok(self.search_best_before(storage, block, false)?
+			.map(|(entry, next)| (entry.valid_from, next)))
+	}
+
+	/// Searches the list, ending with THIS entry for the best entry preceeding (or at)
+	/// given block number.
+	/// If the entry is found, result is the entry and the block id of next entry (if exists).
+	/// NOTE that this function does not check that the passed block is actually linked to
+	/// the blocks it found.
+	pub fn search_best_before<S: Storage<Block, T>>(
+		&self,
+		storage: &S,
+		block: NumberFor<Block>,
+		require_value: bool,
+	) -> ClientResult<Option<(Entry<Block, T>, Option<ComplexBlockId<Block>>)>> {
+		// we're looking for the best value
+		let mut next = None;
+		let mut current = self.valid_from.clone();
+		if block >= self.valid_from.number {
+			let value = if require_value { self.value.clone() } else { None };
+			return Ok(Some((Entry { valid_from: current, value }, next)));
+		}
+
+		// else - travel back in time
+		loop {
+			let entry = storage.require_entry(&current)?;
+			if block >= current.number {
+				return Ok(Some((Entry { valid_from: current, value: entry.value }, next)));
+			}
+
+			next = Some(current);
+			current = match entry.prev_valid_from {
+				Some(prev_valid_from) => prev_valid_from,
+				None => return Ok(None),
+			};
+		}
+	}
+}
+
+impl<Block: BlockT, T: CacheItemT> StorageEntry<Block, T> {
+	/// Converts storage entry into an entry, valid from given block.
+	pub fn into_entry(self, valid_from: ComplexBlockId<Block>) -> Entry<Block, T> {
+		Entry {
+			valid_from,
+			value: self.value,
+		}
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use cache::list_cache::tests::test_id;
+	use cache::list_storage::tests::{DummyStorage, FaultyStorage};
+	use super::*;
+
+	#[test]
+	fn entry_try_update_works() {
+		// when trying to update with the same None value
+		assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: None }.try_update(None), None);
+		// when trying to update with the same Some value
+		assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(1)), None);
+		// when trying to update with different None value
+		assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(None),
+			Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: None }));
+		// when trying to update with different Some value
+		assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(2)),
+			Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(2) }));
+	}
+
+	#[test]
+	fn entry_search_best_before_fails() {
+		// when storage returns error
+		assert!(Entry::<_, u64> { valid_from: test_id(100), value: None }.search_best_before(&FaultyStorage, 50, false).is_err());
+	}
+
+	#[test]
+	fn entry_search_best_before_works() {
+		// when block is better than our best block AND value is not required
+		assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+			.search_best_before(&DummyStorage::new(), 150, false).unwrap(),
+		Some((Entry::<_, u64> { valid_from: test_id(100), value: None }, None)));
+		// when block is better than our best block AND value is required
+		assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+			.search_best_before(&DummyStorage::new(), 150, true).unwrap(),
+		Some((Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }, None)));
+		// when block is found between two entries
+		assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+			.search_best_before(&DummyStorage::new()
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+				.with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(50) }),
+			75, false).unwrap(),
+		Some((Entry::<_, u64> { valid_from: test_id(50), value: Some(50) }, Some(test_id(100)))));
+		// when block is not found
+		assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
+			.search_best_before(&DummyStorage::new()
+				.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
+				.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }),
+			30, true).unwrap(),
+		None);
+	}
+}
diff --git a/substrate/core/client/db/src/cache/list_storage.rs b/substrate/core/client/db/src/cache/list_storage.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b67fefb301083ca939b0692756acd0993bc930fa
--- /dev/null
+++ b/substrate/core/client/db/src/cache/list_storage.rs
@@ -0,0 +1,378 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
+
+//! List-cache storage definition and implementation.
+
+use std::sync::Arc;
+
+use kvdb::{KeyValueDB, DBTransaction};
+
+use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult};
+use codec::{Encode, Decode};
+use runtime_primitives::generic::BlockId;
+use runtime_primitives::traits::{Block as BlockT, NumberFor};
+use utils::{self, db_err, meta_keys};
+
+use cache::{CacheItemT, ComplexBlockId};
+use cache::list_cache::{CommitOperation, Fork};
+use cache::list_entry::{Entry, StorageEntry};
+
+/// Single list-cache metadata.
+#[derive(Debug)]
+#[cfg_attr(test, derive(Clone, PartialEq))]
+pub struct Metadata<Block: BlockT> {
+	/// Block at which best finalized entry is stored.
+	pub finalized: Option<ComplexBlockId<Block>>,
+	/// A set of blocks at which best unfinalized entries are stored.
+	pub unfinalized: Vec<ComplexBlockId<Block>>,
+}
+
+/// Readonly list-cache storage trait.
+pub trait Storage<Block: BlockT, T: CacheItemT> {
+	/// Reads hash of the block at given number.
+	fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>>;
+
+	/// Reads header of the block with given hash.
+	fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>>;
+
+	/// Reads cache metadata: best finalized entry (if some) and the list.
+	fn read_meta(&self) -> ClientResult<Metadata<Block>>;
+
+	/// Reads cache entry from the storage.
+	fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>>;
+
+	/// Reads referenced (and thus existing) cache entry from the storage.
+	fn require_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<StorageEntry<Block, T>> {
+		self.read_entry(at)
+			.and_then(|entry| entry
+				.ok_or_else(|| ClientError::from(
+					ClientErrorKind::Backend(format!("Referenced cache entry at {:?} is not found", at)))))
+	}
+}
+
+/// List-cache storage transaction.
+pub trait StorageTransaction<Block: BlockT, T: CacheItemT> {
+	/// Insert storage entry at given block.
+	fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, entry: &StorageEntry<Block, T>);
+
+	/// Delete storage entry at given block.
+	fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>);
+
+	/// Update metadata of the cache.
+	fn update_meta(
+		&mut self,
+		best_finalized_entry: Option<&Entry<Block, T>>,
+		unfinalized: &[Fork<Block, T>],
+		operation: &CommitOperation<Block, T>,
+	);
+}
+
+/// A set of columns used by the DbStorage.
+#[derive(Debug)]
+pub struct DbColumns {
+	/// Column holding cache meta.
+	pub meta: Option<u32>,
+	/// Column holding the mapping of { block number => block hash } for blocks of the best chain.
+	pub hash_lookup: Option<u32>,
+	/// Column holding the mapping of { block hash => block header }.
+	pub header: Option<u32>,
+	/// Column holding cache entries.
+	pub cache: Option<u32>,
+}
+
+/// Database-backed list cache storage.
+pub struct DbStorage {
+	name: Vec<u8>,
+	meta_key: Vec<u8>,
+	db: Arc<KeyValueDB>,
+	columns: DbColumns,
+}
+
+impl DbStorage {
+	/// Create new database-backed list cache storage.
+	pub fn new(name: Vec<u8>, db: Arc<KeyValueDB>, columns: DbColumns) -> Self {
+		let meta_key = meta::key(&name);
+		DbStorage { name, meta_key, db, columns }
+	}
+
+	/// Get reference to the database.
+	pub fn db(&self) -> &Arc<KeyValueDB> { &self.db }
+
+	/// Get reference to the database columns.
+	pub fn columns(&self) -> &DbColumns { &self.columns }
+
+	/// Encode block id for storing as a key in cache column.
+	/// We append prefix to the actual encoding to allow several caches
+	/// store entries in the same column.
+	pub fn encode_block_id<Block: BlockT>(&self, block: &ComplexBlockId<Block>) -> Vec<u8> {
+		let mut encoded = self.name.clone();
+		encoded.extend(block.hash.as_ref());
+		encoded
+	}
+}
+
+impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for DbStorage {
+	fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
+		utils::read_id::<Block>(&*self.db, self.columns.hash_lookup, BlockId::Number(at))
+	}
+
+	fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
+		utils::read_header::<Block>(&*self.db, self.columns.hash_lookup, self.columns.header, BlockId::Hash(*at))
+	}
+
+	fn read_meta(&self) -> ClientResult<Metadata<Block>> {
+		self.db.get(self.columns.meta, &self.meta_key)
+			.map_err(db_err)
+			.and_then(|meta| match meta {
+				Some(meta) => meta::decode(&*meta),
+				None => Ok(Metadata {
+					finalized: None,
+					unfinalized: Vec::new(),
+				}),
+			})
+	}
+
+	fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
+		self.db.get(self.columns.cache, &self.encode_block_id(at))
+			.map_err(db_err)
+			.and_then(|entry| match entry {
+				Some(entry) => StorageEntry::<Block, T>::decode(&mut &entry[..])
+					.ok_or_else(|| ClientErrorKind::Backend("Failed to decode cache entry".into()).into())
+					.map(Some),
+				None => Ok(None),
+			})
+	}
+}
+
+/// Database-backed list cache storage transaction.
+pub struct DbStorageTransaction<'a> {
+	storage: &'a DbStorage,
+	tx: &'a mut DBTransaction,
+}
+
+impl<'a> DbStorageTransaction<'a> {
+	/// Create new database transaction.
+	pub fn new(storage: &'a DbStorage, tx: &'a mut DBTransaction) -> Self {
+		DbStorageTransaction { storage, tx }
+	}
+}
+
+impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DbStorageTransaction<'a> {
+	fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, entry: &StorageEntry<Block, T>) {
+		self.tx.put(self.storage.columns.cache, &self.storage.encode_block_id(at), &entry.encode());
+	}
+
+	fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>) {
+		self.tx.delete(self.storage.columns.cache, &self.storage.encode_block_id(at));
+	}
+
+	fn update_meta(
+		&mut self,
+		best_finalized_entry: Option<&Entry<Block, T>>,
+		unfinalized: &[Fork<Block, T>],
+		operation: &CommitOperation<Block, T>,
+	) {
+		self.tx.put(
+			self.storage.columns.meta,
+			&self.storage.meta_key,
+			&meta::encode(best_finalized_entry, unfinalized, operation));
+	}
+}
+
+/// Metadata related functions.
+mod meta {
+	use super::*;
+
+	/// Convert cache name into cache metadata key. 
+	pub fn key(name: &[u8]) -> Vec<u8> {
+		let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec();
+		key_name.extend_from_slice(name);
+		key_name
+	}
+
+	/// Encode cache metadata 'applying' commit operation before encoding.
+	pub fn encode<Block: BlockT, T: CacheItemT>(
+		best_finalized_entry: Option<&Entry<Block, T>>,
+		unfinalized: &[Fork<Block, T>],
+		op: &CommitOperation<Block, T>
+	) -> Vec<u8> {
+		let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from);
+		let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::<Vec<_>>();
+
+		match op {
+			CommitOperation::AppendNewBlock(_, _) => (),
+			CommitOperation::AppendNewEntry(index, ref entry) => {
+				unfinalized[*index] = &entry.valid_from;
+			},
+			CommitOperation::AddNewFork(ref entry) => {
+				unfinalized.push(&entry.valid_from);
+			},
+			CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => {
+				finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from);
+				for fork_index in forks.iter().rev() {
+					unfinalized.remove(*fork_index);
+				}
+			},
+		}
+
+		(finalized, unfinalized).encode()
+	}
+
+	/// Decode meta information.
+	pub fn decode<Block: BlockT>(encoded: &[u8]) -> ClientResult<Metadata<Block>> {
+		let input = &mut &*encoded;
+		let finalized: Option<ComplexBlockId<Block>> = Decode::decode(input)
+			.ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?;
+		let unfinalized: Vec<ComplexBlockId<Block>> = Decode::decode(input)
+			.ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?;
+
+		Ok(Metadata { finalized, unfinalized })
+	}
+}
+
+#[cfg(test)]
+pub mod tests {
+	use std::collections::{HashMap, HashSet};
+	use runtime_primitives::traits::Header as HeaderT;
+	use super::*;
+
+	pub struct FaultyStorage;
+
+	impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for FaultyStorage {
+		fn read_id(&self, _at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
+			Err(ClientErrorKind::Backend("TestError".into()).into())
+		}
+
+		fn read_header(&self, _at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
+			Err(ClientErrorKind::Backend("TestError".into()).into())
+		}
+
+		fn read_meta(&self) -> ClientResult<Metadata<Block>> {
+			Err(ClientErrorKind::Backend("TestError".into()).into())
+		}
+
+		fn read_entry(&self, _at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
+			Err(ClientErrorKind::Backend("TestError".into()).into())
+		}
+	}
+
+	pub struct DummyStorage<Block: BlockT, T: CacheItemT> {
+		meta: Metadata<Block>,
+		ids: HashMap<NumberFor<Block>, Block::Hash>,
+		headers: HashMap<Block::Hash, Block::Header>,
+		entries: HashMap<Block::Hash, StorageEntry<Block, T>>,
+	}
+
+	impl<Block: BlockT, T: CacheItemT> DummyStorage<Block, T> {
+		pub fn new() -> Self {
+			DummyStorage {
+				meta: Metadata {
+					finalized: None,
+					unfinalized: Vec::new(),
+				},
+				ids: HashMap::new(),
+				headers: HashMap::new(),
+				entries: HashMap::new(),
+			}
+		}
+
+		pub fn with_meta(mut self, finalized: Option<ComplexBlockId<Block>>, unfinalized: Vec<ComplexBlockId<Block>>) -> Self {
+			self.meta.finalized = finalized;
+			self.meta.unfinalized = unfinalized;
+			self
+		}
+
+		pub fn with_id(mut self, at: NumberFor<Block>, id: Block::Hash) -> Self {
+			self.ids.insert(at, id);
+			self
+		}
+
+		pub fn with_header(mut self, header: Block::Header) -> Self {
+			self.headers.insert(header.hash(), header);
+			self
+		}
+
+		pub fn with_entry(mut self, at: ComplexBlockId<Block>, entry: StorageEntry<Block, T>) -> Self {
+			self.entries.insert(at.hash, entry);
+			self
+		}
+	}
+
+	impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for DummyStorage<Block, T> {
+		fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
+			Ok(self.ids.get(&at).cloned())
+		}
+
+		fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
+			Ok(self.headers.get(&at).cloned())
+		}
+
+		fn read_meta(&self) -> ClientResult<Metadata<Block>> {
+			Ok(self.meta.clone())
+		}
+
+		fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
+			Ok(self.entries.get(&at.hash).cloned())
+		}
+	}
+
+	pub struct DummyTransaction<Block: BlockT> {
+		updated_meta: Option<Metadata<Block>>,
+		inserted_entries: HashSet<Block::Hash>,
+		removed_entries: HashSet<Block::Hash>,
+	}
+
+	impl<Block: BlockT> DummyTransaction<Block> {
+		pub fn new() -> Self {
+			DummyTransaction {
+				updated_meta: None,
+				inserted_entries: HashSet::new(),
+				removed_entries: HashSet::new(),
+			}
+		}
+
+		pub fn inserted_entries(&self) -> &HashSet<Block::Hash> {
+			&self.inserted_entries
+		}
+
+		pub fn removed_entries(&self) -> &HashSet<Block::Hash> {
+			&self.removed_entries
+		}
+
+		pub fn updated_meta(&self) -> &Option<Metadata<Block>> {
+			&self.updated_meta
+		}
+	}
+
+	impl<Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DummyTransaction<Block> {
+		fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, _entry: &StorageEntry<Block, T>) {
+			self.inserted_entries.insert(at.hash);
+		}
+
+		fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>) {
+			self.removed_entries.insert(at.hash);
+		}
+
+		fn update_meta(
+			&mut self,
+			best_finalized_entry: Option<&Entry<Block, T>>,
+			unfinalized: &[Fork<Block, T>],
+			operation: &CommitOperation<Block, T>,
+		) {
+			self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap());
+		}
+	}
+}
diff --git a/substrate/core/client/db/src/cache/mod.rs b/substrate/core/client/db/src/cache/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..19a167e9865c9b81e9035248ba190e94b4b57bed
--- /dev/null
+++ b/substrate/core/client/db/src/cache/mod.rs
@@ -0,0 +1,207 @@
+// Copyright 2017 Parity Technologies (UK) Ltd.
+// This file is part of Substrate.
+
+// Substrate is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Substrate is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Substrate.  If not, see <http://www.gnu.org/licenses/>.
+
+//! DB-backed cache of blockchain data.
+
+use std::sync::Arc;
+use parking_lot::RwLock;
+
+use kvdb::{KeyValueDB, DBTransaction};
+
+use client::blockchain::Cache as BlockchainCache;
+use client::error::Result as ClientResult;
+use codec::{Encode, Decode};
+use primitives::AuthorityId;
+use runtime_primitives::generic::BlockId;
+use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As};
+use utils::{self, COLUMN_META};
+
+use self::list_cache::ListCache;
+
+mod list_cache;
+mod list_entry;
+mod list_storage;
+
+/// Minimal post-finalization age age of finalized blocks before they'll pruned.
+const PRUNE_DEPTH: u64 = 1024;
+
+/// Block identifier that holds both hash and number.
+#[derive(Clone, Debug, Encode, Decode, PartialEq)]
+pub struct ComplexBlockId<Block: BlockT> {
+	hash: Block::Hash,
+	number: NumberFor<Block>,
+}
+
+impl<Block: BlockT> ComplexBlockId<Block> {
+	/// Create new complex block id.
+	pub fn new(hash: Block::Hash, number: NumberFor<Block>) -> Self {
+		ComplexBlockId { hash, number }
+	}
+}
+
+impl<Block: BlockT> ::std::cmp::PartialOrd for ComplexBlockId<Block> {
+	fn partial_cmp(&self, other: &ComplexBlockId<Block>) -> Option<::std::cmp::Ordering> {
+		self.number.partial_cmp(&other.number)
+	}
+}
+
+/// All cache items must implement this trait.
+pub trait CacheItemT: Clone + Decode + Encode + PartialEq {}
+
+impl<T> CacheItemT for T where T: Clone + Decode + Encode + PartialEq {}
+
+/// Database-backed blockchain data cache.
+pub struct DbCache<Block: BlockT> {
+	authorities_at: ListCache<Block, Vec<AuthorityId>, self::list_storage::DbStorage>,
+}
+
+impl<Block: BlockT> DbCache<Block> {
+	/// Create new cache.
+	pub fn new(
+		db: Arc<KeyValueDB>,
+		hash_lookup_column: Option<u32>,
+		header_column: Option<u32>,
+		authorities_column: Option<u32>,
+		best_finalized_block: ComplexBlockId<Block>,
+	) -> Self {
+		DbCache {
+			authorities_at: ListCache::new(
+				self::list_storage::DbStorage::new(b"auth".to_vec(), db,
+					self::list_storage::DbColumns {
+						meta: COLUMN_META,
+						hash_lookup: hash_lookup_column,
+						header: header_column,
+						cache: authorities_column,
+					},
+				),
+				As::sa(PRUNE_DEPTH),
+				best_finalized_block,
+			),
+		}
+	}
+
+	/// Begin cache transaction.
+	pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> {
+		DbCacheTransaction {
+			cache: self,
+			tx,
+			authorities_at_op: None,
+		}
+	}
+
+	/// Run post-commit cache operations.
+	pub fn commit(&mut self, ops: DbCacheTransactionOps<Block>) {
+		if let Some(authorities_at_op) = ops.authorities_at_op {
+			self.authorities_at.on_transaction_commit(authorities_at_op);
+		}
+	}
+}
+
+/// Cache operations that are to be committed after database transaction is committed.
+pub struct DbCacheTransactionOps<Block: BlockT> {
+	authorities_at_op: Option<self::list_cache::CommitOperation<Block, Vec<AuthorityId>>>,
+}
+
+/// Database-backed blockchain data cache transaction valid for single block import.
+pub struct DbCacheTransaction<'a, Block: BlockT> {
+	cache: &'a mut DbCache<Block>,
+	tx: &'a mut DBTransaction,
+	authorities_at_op: Option<self::list_cache::CommitOperation<Block, Vec<AuthorityId>>>,
+}
+
+impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
+	/// Convert transaction into post-commit operations set.
+	pub fn into_ops(self) -> DbCacheTransactionOps<Block> {
+		DbCacheTransactionOps {
+			authorities_at_op: self.authorities_at_op,
+		}
+	}
+
+	/// When new block is inserted into database.
+	pub fn on_block_insert(
+		mut self,
+		parent: ComplexBlockId<Block>,
+		block: ComplexBlockId<Block>,
+		authorities_at: Option<Vec<AuthorityId>>,
+		is_final: bool,
+	) -> ClientResult<Self> {
+		assert!(self.authorities_at_op.is_none());
+
+		self.authorities_at_op = self.cache.authorities_at.on_block_insert(
+			&mut self::list_storage::DbStorageTransaction::new(
+				self.cache.authorities_at.storage(),
+				&mut self.tx
+			),
+			parent,
+			block,
+			authorities_at,
+			is_final,
+		)?;
+
+		Ok(self)
+	}
+
+	/// When previously inserted block is finalized.
+	pub fn on_block_finalize(
+		mut self,
+		parent: ComplexBlockId<Block>,
+		block: ComplexBlockId<Block>
+	) -> ClientResult<Self> {
+		assert!(self.authorities_at_op.is_none());
+
+		self.authorities_at_op = self.cache.authorities_at.on_block_finalize(
+			&mut self::list_storage::DbStorageTransaction::new(
+				self.cache.authorities_at.storage(),
+				&mut self.tx
+			),
+			parent,
+			block,
+		)?;
+
+		Ok(self)
+	}
+}
+
+/// Synchronous implementation of database-backed blockchain data cache.
+pub struct DbCacheSync<Block: BlockT>(pub RwLock<DbCache<Block>>);
+
+impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
+	fn authorities_at(&self, at: BlockId<Block>) -> Option<Vec<AuthorityId>> {
+		let cache = self.0.read();
+		let storage = cache.authorities_at.storage();
+		let db = storage.db();
+		let columns = storage.columns();
+		let at = match at {
+			BlockId::Hash(hash) => {
+				let header = utils::read_header::<Block>(
+					&**db,
+					columns.hash_lookup,
+					columns.header,
+					BlockId::Hash(hash.clone())).ok()??;
+				ComplexBlockId::new(hash, *header.number())
+			},
+			BlockId::Number(number) => {
+				let hash = utils::read_id::<Block>(
+					&**db,
+					columns.hash_lookup,
+					BlockId::Number(number.clone())).ok()??;
+				ComplexBlockId::new(hash, number)
+			},
+		};
+
+		cache.authorities_at.value_at_block(&at).ok()?
+	}
+}
diff --git a/substrate/core/client/db/src/light.rs b/substrate/core/client/db/src/light.rs
index 5c458efb09e41ac56dbe38a2ebb39896f1af43e3..8143645166ce9c0ea4b5533b4f9afcea746bba9d 100644
--- a/substrate/core/client/db/src/light.rs
+++ b/substrate/core/client/db/src/light.rs
@@ -32,7 +32,7 @@ use primitives::{AuthorityId, Blake2Hasher};
 use runtime_primitives::generic::BlockId;
 use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
 	Zero, One, As, NumberFor};
-use cache::DbCache;
+use cache::{DbCacheSync, DbCache, ComplexBlockId};
 use utils::{meta_keys, Meta, db_err, number_to_lookup_key, open_database,
 	read_db, read_id, read_meta};
 use DatabaseSettings;
@@ -41,20 +41,17 @@ pub(crate) mod columns {
 	pub const META: Option<u32> = ::utils::COLUMN_META;
 	pub const HASH_LOOKUP: Option<u32> = Some(1);
 	pub const HEADER: Option<u32> = Some(2);
-	pub const AUTHORITIES: Option<u32> = Some(3);
+	pub const CACHE: Option<u32> = Some(3);
 	pub const CHT: Option<u32> = Some(4);
 }
 
-/// Keep authorities for last 'AUTHORITIES_ENTRIES_TO_KEEP' blocks.
-#[allow(unused)]
-pub(crate) const AUTHORITIES_ENTRIES_TO_KEEP: u64 = cht::SIZE;
-
 /// Light blockchain storage. Stores most recent headers + CHTs for older headers.
+/// Locks order: meta, leaves, cache.
 pub struct LightStorage<Block: BlockT> {
 	db: Arc<KeyValueDB>,
 	meta: RwLock<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>>,
 	leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
-	_cache: DbCache<Block>,
+	cache: DbCacheSync<Block>,
 }
 
 #[derive(Clone, PartialEq, Debug)]
@@ -86,31 +83,27 @@ impl<Block> LightStorage<Block>
 	}
 
 	fn from_kvdb(db: Arc<KeyValueDB>) -> ClientResult<Self> {
+		let meta = read_meta::<Block>(&*db, columns::META, columns::HEADER)?;
+		let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
 		let cache = DbCache::new(
 			db.clone(),
 			columns::HASH_LOOKUP,
 			columns::HEADER,
-			columns::AUTHORITIES
-		)?;
-		let meta = RwLock::new(read_meta::<Block>(&*db, columns::META, columns::HEADER)?);
-		let leaves = RwLock::new(LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?);
+			columns::CACHE,
+			ComplexBlockId::new(meta.finalized_hash, meta.finalized_number),
+		);
 
 		Ok(LightStorage {
 			db,
-			meta,
-			leaves,
-			_cache: cache,
+			meta: RwLock::new(meta),
+			cache: DbCacheSync(RwLock::new(cache)),
+			leaves: RwLock::new(leaves),
 		})
 	}
 
 	#[cfg(test)]
-	pub(crate) fn db(&self) -> &Arc<KeyValueDB> {
-		&self.db
-	}
-
-	#[cfg(test)]
-	pub(crate) fn cache(&self) -> &DbCache<Block> {
-		&self._cache
+	pub(crate) fn cache(&self) -> &DbCacheSync<Block> {
+		&self.cache
 	}
 
 	fn update_meta(
@@ -187,8 +180,12 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
 }
 
 impl<Block: BlockT> LightStorage<Block> {
-	// note that a block is finalized. only call with child of last finalized block.
-	fn note_finalized(&self, transaction: &mut DBTransaction, header: &Block::Header, hash: Block::Hash) -> ClientResult<()> {
+	fn note_finalized(
+		&self,
+		transaction: &mut DBTransaction,
+		header: &Block::Header,
+		hash: Block::Hash,
+	) -> ClientResult<()> {
 		let meta = self.meta.read();
 		if &meta.finalized_hash != header.parent_hash() {
 			return Err(::client::error::ErrorKind::NonSequentialFinalization(
@@ -236,7 +233,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 	fn import_header(
 		&self,
 		header: Block::Header,
-		_authorities: Option<Vec<AuthorityId>>,
+		authorities: Option<Vec<AuthorityId>>,
 		leaf_state: NewBlockState,
 	) -> ClientResult<()> {
 		let mut transaction = DBTransaction::new();
@@ -246,11 +243,8 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 		let parent_hash = *header.parent_hash();
 
 		transaction.put(columns::HEADER, hash.as_ref(), &header.encode());
-		transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
 
 		if leaf_state.is_best() {
-			transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
-
 			// handle reorg.
 			{
 				let meta = self.meta.read();
@@ -286,7 +280,8 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 				}
 			}
 
-			// TODO: cache authorities for previous block, accounting for reorgs.
+			transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
+			transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
 		}
 
 		let finalized = match leaf_state {
@@ -302,6 +297,16 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 			let mut leaves = self.leaves.write();
 			let displaced_leaf = leaves.import(hash, number, parent_hash);
 
+			let mut cache = self.cache.0.write();
+			let cache_ops = cache.transaction(&mut transaction)
+				.on_block_insert(
+					ComplexBlockId::new(*header.parent_hash(), if number == Zero::zero() { Zero::zero() } else { number - One::one() }),
+					ComplexBlockId::new(hash, number),
+					authorities,
+					finalized,
+				)?
+				.into_ops();
+
 			debug!("Light DB Commit {:?} ({})", hash, number);
 			let write_result = self.db.write(transaction).map_err(db_err);
 			if let Err(e) = write_result {
@@ -311,7 +316,10 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 				}
 				return Err(e);
 			}
+
+			cache.commit(cache_ops);
 		}
+
 		self.update_meta(hash, number, leaf_state.is_best(), finalized);
 
 		Ok(())
@@ -332,9 +340,22 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
 			let mut transaction = DBTransaction::new();
 			// TODO: ensure best chain contains this block.
 			let hash = header.hash();
+			let number = *header.number();
 			self.note_finalized(&mut transaction, &header, hash.clone())?;
-			self.db.write(transaction).map_err(db_err)?;
+			{
+				let mut cache = self.cache.0.write();
+				let cache_ops = cache.transaction(&mut transaction)
+					.on_block_finalize(
+						ComplexBlockId::new(*header.parent_hash(), if number == Zero::zero() { Zero::zero() } else { number - One::one() }),
+						ComplexBlockId::new(hash, number)
+					)?
+					.into_ops();
+
+				self.db.write(transaction).map_err(db_err)?;
+				cache.commit(cache_ops);
+			}
 			self.update_meta(hash, header.number().clone(), false, true);
+
 			Ok(())
 		} else {
 			Err(ClientErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", id)).into())
@@ -358,6 +379,16 @@ pub(crate) mod tests {
 
 	type Block = RawBlock<u32>;
 
+	fn prepare_header(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header {
+		Header {
+			number: number.into(),
+			parent_hash: *parent,
+			state_root: Hash::random(),
+			digest: Default::default(),
+			extrinsics_root,
+		}
+	}
+
 	pub fn insert_block_with_extrinsics_root(
 		db: &LightStorage<Block>,
 		parent: &Hash,
@@ -365,14 +396,7 @@ pub(crate) mod tests {
 		authorities: Option<Vec<AuthorityId>>,
 		extrinsics_root: Hash,
 	) -> Hash {
-		let header = Header {
-			number: number.into(),
-			parent_hash: *parent,
-			state_root: Default::default(),
-			digest: Default::default(),
-			extrinsics_root,
-		};
-
+		let header = prepare_header(parent, number, extrinsics_root);
 		let hash = header.hash();
 		db.import_header(header, authorities, NewBlockState::Best).unwrap();
 		hash
@@ -384,19 +408,36 @@ pub(crate) mod tests {
 		number: u64,
 		authorities: Option<Vec<AuthorityId>>
 	) -> Hash {
-		let header = Header {
-			number: number.into(),
-			parent_hash: *parent,
-			state_root: Default::default(),
-			digest: Default::default(),
-			extrinsics_root: Default::default(),
-		};
-
+		let header = prepare_header(parent, number, Default::default());
 		let hash = header.hash();
 		db.import_header(header, authorities, NewBlockState::Best).unwrap();
 		hash
 	}
 
+	fn insert_final_block(
+		db: &LightStorage<Block>,
+		parent: &Hash,
+		number: u64,
+		authorities: Option<Vec<AuthorityId>>
+	) -> Hash {
+		let header = prepare_header(parent, number, Default::default());
+		let hash = header.hash();
+		db.import_header(header, authorities, NewBlockState::Final).unwrap();
+		hash
+	}
+
+	fn insert_non_best_block(
+		db: &LightStorage<Block>,
+		parent: &Hash,
+		number: u64,
+		authorities: Option<Vec<AuthorityId>>
+	) -> Hash {
+		let header = prepare_header(parent, number, Default::default());
+		let hash = header.hash();
+		db.import_header(header, authorities, NewBlockState::Normal).unwrap();
+		hash
+	}
+
 	#[test]
 	fn returns_known_header() {
 		let db = LightStorage::new_test();
@@ -464,7 +505,7 @@ pub(crate) mod tests {
 		let db = LightStorage::new_test();
 
 		// insert genesis block header (never pruned)
-		let mut prev_hash = insert_block(&db, &Default::default(), 0, None);
+		let mut prev_hash = insert_final_block(&db, &Default::default(), 0, None);
 
 		// insert SIZE blocks && ensure that nothing is pruned
 		for number in 0..cht::SIZE {
@@ -511,7 +552,7 @@ pub(crate) mod tests {
 		let db = LightStorage::new_test();
 
 		// insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created
-		let mut prev_hash = insert_block(&db, &Default::default(), 0, None);
+		let mut prev_hash = insert_final_block(&db, &Default::default(), 0, None);
 		for i in 1..1 + cht::SIZE + cht::SIZE + 1 {
 			prev_hash = insert_block(&db, &prev_hash, i as u64, None);
 			db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
@@ -586,4 +627,123 @@ pub(crate) mod tests {
 			assert!(tree_route.enacted().is_empty());
 		}
 	}
+
+	#[test]
+	fn authorites_are_cached() {
+		let db = LightStorage::new_test();
+
+		fn run_checks(db: &LightStorage<Block>, max: u64, checks: &[(u64, Option<Vec<AuthorityId>>)]) {
+			for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) {
+				let actual = db.cache().authorities_at(BlockId::Number(*at));
+				assert_eq!(*expected, actual);
+			}
+		}
+
+		let (hash2, hash6) = {
+			// first few blocks are instantly finalized
+			// B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(None)
+			let checks = vec![
+				(0, None),
+				(1, None),
+				(2, Some(vec![[1u8; 32].into()])),
+				(3, Some(vec![[1u8; 32].into()])),
+				(4, Some(vec![[1u8; 32].into(), [2u8; 32].into()])),
+				(5, Some(vec![[1u8; 32].into(), [2u8; 32].into()])),
+				(6, None),
+				(7, None), // block will work for 'future' block too
+			];
+
+			let hash0 = insert_final_block(&db, &Default::default(), 0, None);
+			run_checks(&db, 0, &checks);
+			let hash1 = insert_final_block(&db, &hash0, 1, None);
+			run_checks(&db, 1, &checks);
+			let hash2 = insert_final_block(&db, &hash1, 2, Some(vec![[1u8; 32].into()]));
+			run_checks(&db, 2, &checks);
+			let hash3 = insert_final_block(&db, &hash2, 3, Some(vec![[1u8; 32].into()]));
+			run_checks(&db, 3, &checks);
+			let hash4 = insert_final_block(&db, &hash3, 4, Some(vec![[1u8; 32].into(), [2u8; 32].into()]));
+			run_checks(&db, 4, &checks);
+			let hash5 = insert_final_block(&db, &hash4, 5, Some(vec![[1u8; 32].into(), [2u8; 32].into()]));
+			run_checks(&db, 5, &checks);
+			let hash6 = insert_final_block(&db, &hash5, 6, None);
+			run_checks(&db, 7, &checks);
+
+			(hash2, hash6)
+		};
+
+		{
+			// some older non-best blocks are inserted
+			// ... -> B2(1) -> B2_1(1) -> B2_2(2)
+			// => the cache ignores all writes before best finalized block
+			let hash2_1 = insert_non_best_block(&db, &hash2, 3, Some(vec![[1u8; 32].into()]));
+			assert_eq!(None, db.cache().authorities_at(BlockId::Hash(hash2_1)));
+			let hash2_2 = insert_non_best_block(&db, &hash2_1, 4, Some(vec![[1u8; 32].into(), [2u8; 32].into()]));
+			assert_eq!(None, db.cache().authorities_at(BlockId::Hash(hash2_2)));
+		}
+
+		let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = {
+			// inserting non-finalized blocks
+			// B6(None) -> B7(3) -> B8(3)
+			//          \> B6_1(4) -> B6_2(4)
+			//                     \> B6_1_1(5)
+			//                     \> B6_1_2(6) -> B6_1_3(7)
+
+			let hash7 = insert_block(&db, &hash6, 7, Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
+			let hash8 = insert_block(&db, &hash7, 8, Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
+			let hash6_1 = insert_block(&db, &hash6, 7, Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
+			let hash6_1_1 = insert_non_best_block(&db, &hash6_1, 8, Some(vec![[5u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
+			let hash6_1_2 = insert_non_best_block(&db, &hash6_1, 8, Some(vec![[6u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), Some(vec![[6u8; 32].into()]));
+			let hash6_2 = insert_block(&db, &hash6_1, 8, Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), Some(vec![[6u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_2)), Some(vec![[4u8; 32].into()]));
+
+			(hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2)
+		};
+
+		{
+			// finalize block hash6_1
+			db.finalize_header(BlockId::Hash(hash6_1)).unwrap();
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), Some(vec![[6u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_2)), Some(vec![[4u8; 32].into()]));
+			// finalize block hash6_2
+			db.finalize_header(BlockId::Hash(hash6_2)).unwrap();
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), None);
+			assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_2)), Some(vec![[4u8; 32].into()]));
+		}
+	}
 }
diff --git a/substrate/core/client/db/src/utils.rs b/substrate/core/client/db/src/utils.rs
index e7139467a194d3e5682b62e490b9f050316d4242..e4448f4538fe09116cbf48889cee7e41610e79b8 100644
--- a/substrate/core/client/db/src/utils.rs
+++ b/substrate/core/client/db/src/utils.rs
@@ -44,8 +44,8 @@ pub mod meta_keys {
 	pub const BEST_BLOCK: &[u8; 4] = b"best";
 	/// Last finalized block key.
 	pub const FINALIZED_BLOCK: &[u8; 5] = b"final";
-	/// Best authorities block key.
-	pub const BEST_AUTHORITIES: &[u8; 4] = b"auth";
+	/// Meta information prefix for list-based caches.
+	pub const CACHE_META_PREFIX: &[u8; 5] = b"cache";
 	/// Genesis block hash.
 	pub const GENESIS_HASH: &[u8; 3] = b"gen";
 	/// Leaves prefix list key.
@@ -82,17 +82,6 @@ pub fn number_to_lookup_key<N>(n: N) -> BlockLookupKey where N: As<u64> {
 	]
 }
 
-/// Convert block lookup key into block number.
-pub fn lookup_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
-	match key.len() {
-		4 => Ok((key[0] as u64) << 24
-			| (key[1] as u64) << 16
-			| (key[2] as u64) << 8
-			| (key[3] as u64)).map(As::sa),
-		_ => Err(client::error::ErrorKind::Backend("Invalid block key".into()).into()),
-	}
-}
-
 /// Maps database error to client error
 pub fn db_err(err: io::Error) -> client::error::Error {
 	use std::error::Error;