Skip to content
Snippets Groups Projects
Commit fb058ae2 authored by Svyatoslav Nikolsky's avatar Svyatoslav Nikolsky Committed by Gav Wood
Browse files

authorities_at cache update (#836)

* AuthoritiesAt cache update

* fix after merge
parent ceda61f1
No related merge requests found
// Copyright 2017 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! DB-backed cache of blockchain data.
use std::sync::Arc;
use parking_lot::RwLock;
use kvdb::{KeyValueDB, DBTransaction};
use client::blockchain::Cache as BlockchainCache;
use client::error::Result as ClientResult;
use codec::{Codec, Encode, Decode};
use primitives::AuthorityId;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, As, NumberFor};
use utils::{COLUMN_META, BlockLookupKey, db_err, meta_keys, lookup_key_to_number, number_to_lookup_key};
/// Database-backed cache of blockchain data.
pub struct DbCache<Block: BlockT> {
db: Arc<KeyValueDB>,
block_index_column: Option<u32>,
header_column: Option<u32>,
authorities_at: DbCacheList<Block, Vec<AuthorityId>>,
}
impl<Block> DbCache<Block>
where
Block: BlockT,
NumberFor<Block>: As<u64>,
{
/// Create new cache.
pub fn new(
db: Arc<KeyValueDB>,
block_index_column: Option<u32>,
header_column: Option<u32>,
authorities_column: Option<u32>
) -> ClientResult<Self> {
Ok(DbCache {
db: db.clone(),
block_index_column,
header_column,
authorities_at: DbCacheList::new(db, meta_keys::BEST_AUTHORITIES, authorities_column)?,
})
}
/// Get authorities_cache.
#[allow(unused)]
pub fn authorities_at_cache(&self) -> &DbCacheList<Block, Vec<AuthorityId>> {
&self.authorities_at
}
}
impl<Block> BlockchainCache<Block> for DbCache<Block>
where
Block: BlockT,
NumberFor<Block>: As<u64>,
{
fn authorities_at(&self, at: BlockId<Block>) -> Option<Vec<AuthorityId>> {
use runtime_primitives::traits::Header as HeaderT;
let number = match at {
BlockId::Number(n) => Ok(number_to_lookup_key(n)),
BlockId::Hash(h) => {
let maybe_header = ::utils::read_header::<Block>(
&*self.db,
self.block_index_column,
self.header_column,
BlockId::Hash(h),
);
match maybe_header {
Ok(Some(hdr)) => Ok(number_to_lookup_key(*hdr.number())),
Ok(None) => return None, // no such block.
Err(e) => Err(e),
}
}
};
let authorities_at = number.and_then(|at| self.authorities_at.value_at_key(at));
match authorities_at {
Ok(authorities) => authorities,
Err(error) => {
warn!("Trying to read authorities from db cache has failed with: {}", error);
None
},
}
}
}
/// Database-backed blockchain cache which holds its entries as a list.
/// The meta column holds the pointer to the best known cache entry and
/// every entry points to the previous entry.
/// New entry appears when the set of authorities changes in block, so the
/// best entry here means the entry that is valid for the best block (and
/// probably for its ascendants).
pub struct DbCacheList<Block: BlockT, T: Clone> {
db: Arc<KeyValueDB>,
meta_key: &'static [u8],
column: Option<u32>,
/// Best entry at the moment. None means that cache has no entries at all.
best_entry: RwLock<Option<Entry<NumberFor<Block>, T>>>,
}
/// Single cache entry.
#[derive(Clone)]
#[cfg_attr(test, derive(Debug, PartialEq))]
pub struct Entry<N, T: Clone> {
/// first block, when this value became actual
valid_from: N,
/// None means that we do not know the value starting from `valid_from` block
value: Option<T>,
}
/// Internal representation of the single cache entry. The entry points to the
/// previous entry in the cache, allowing us to traverse back in time in list-style.
#[derive(Encode, Decode)]
#[cfg_attr(test, derive(Debug, PartialEq))]
struct StorageEntry<N, T> {
/// None if valid from the beginning
prev_valid_from: Option<N>,
/// None means that we do not know the value starting from `valid_from` block
value: Option<T>,
}
impl<Block, T> DbCacheList<Block, T>
where
Block: BlockT,
NumberFor<Block>: As<u64>,
T: Clone + PartialEq + Codec,
{
/// Creates new cache list.
fn new(db: Arc<KeyValueDB>, meta_key: &'static [u8], column: Option<u32>) -> ClientResult<Self> {
let best_entry = RwLock::new(db.get(COLUMN_META, meta_key)
.map_err(db_err)
.and_then(|block| match block {
Some(block) => {
let valid_from = lookup_key_to_number(&block)?;
read_storage_entry::<Block, T>(&*db, column, valid_from)
.map(|entry| Some(Entry {
valid_from,
value: entry
.expect("meta entry references the entry at the block; storage entry at block exists when referenced; qed")
.value,
}))
},
None => Ok(None),
})?);
Ok(DbCacheList {
db,
column,
meta_key,
best_entry,
})
}
/// Gets the best known entry.
pub fn best_entry(&self) -> Option<Entry<NumberFor<Block>, T>> {
self.best_entry.read().clone()
}
/// Commits the new best pending value to the database. Returns Some if best entry must
/// be updated after transaction is committed.
#[allow(unused)]
pub fn commit_best_entry(
&self,
transaction: &mut DBTransaction,
valid_from: NumberFor<Block>,
pending_value: Option<T>
) -> Option<Entry<NumberFor<Block>, T>> {
let best_entry = self.best_entry();
let update_best_entry = match (
best_entry.as_ref().and_then(|a| a.value.as_ref()),
pending_value.as_ref()
) {
(Some(best_value), Some(pending_value)) => best_value != pending_value,
(None, Some(_)) | (Some(_), None) => true,
(None, None) => false,
};
if !update_best_entry {
return None;
}
let valid_from_key = number_to_lookup_key(valid_from);
transaction.put(COLUMN_META, self.meta_key, &valid_from_key);
transaction.put(self.column, &valid_from_key, &StorageEntry {
prev_valid_from: best_entry.map(|b| b.valid_from),
value: pending_value.clone(),
}.encode());
Some(Entry {
valid_from,
value: pending_value,
})
}
/// Updates the best in-memory cache entry. Must be called after transaction with changes
/// from commit_best_entry has been committed.
#[allow(unused)]
pub fn update_best_entry(&self, best_entry: Option<Entry<NumberFor<Block>, T>>) {
*self.best_entry.write() = best_entry;
}
/// Prune all entries from the beginning up to the block (including entry at the number). Returns
/// the number of pruned entries. Pruning never deletes the latest entry in the cache.
#[allow(unused)]
pub fn prune_entries(
&self,
transaction: &mut DBTransaction,
last_to_prune: NumberFor<Block>
) -> ClientResult<usize> {
// find the last entry we want to keep
let mut last_entry_to_keep = match self.best_entry() {
Some(best_entry) => best_entry.valid_from,
None => return Ok(0),
};
let mut first_entry_to_remove = last_entry_to_keep;
while first_entry_to_remove > last_to_prune {
last_entry_to_keep = first_entry_to_remove;
let entry = read_storage_entry::<Block, T>(&*self.db, self.column, first_entry_to_remove)?
.expect("entry referenced from the next entry; entry exists when referenced; qed");
// if we have reached the first list entry
// AND all list entries are for blocks that are later than last_to_prune
// => nothing to prune
first_entry_to_remove = match entry.prev_valid_from {
Some(prev_valid_from) => prev_valid_from,
None => return Ok(0),
}
}
// remove all entries, starting from entry_to_remove
let mut pruned = 0;
let mut entry_to_remove = Some(first_entry_to_remove);
while let Some(current_entry) = entry_to_remove {
let entry = read_storage_entry::<Block, T>(&*self.db, self.column, current_entry)?
.expect("referenced entry exists; entry_to_remove is a reference to the entry; qed");
if current_entry != last_entry_to_keep {
transaction.delete(self.column, &number_to_lookup_key(current_entry));
pruned += 1;
}
entry_to_remove = entry.prev_valid_from;
}
let mut entry = read_storage_entry::<Block, T>(&*self.db, self.column, last_entry_to_keep)?
.expect("last_entry_to_keep >= first_entry_to_remove; that means that we're leaving this entry in the db; qed");
entry.prev_valid_from = None;
transaction.put(self.column, &number_to_lookup_key(last_entry_to_keep), &entry.encode());
Ok(pruned)
}
/// Reads the cached value, actual at given block. Returns None if the value was not cached
/// or if it has been pruned.
fn value_at_key(&self, key: BlockLookupKey) -> ClientResult<Option<T>> {
let at = lookup_key_to_number::<NumberFor<Block>>(&key)?;
let best_valid_from = match self.best_entry() {
// there are entries in cache
Some(best_entry) => {
// we're looking for the best value
if at >= best_entry.valid_from {
return Ok(best_entry.value);
}
// we're looking for the value of older blocks
best_entry.valid_from
},
// there are no entries in the cache
None => return Ok(None),
};
let mut entry = read_storage_entry::<Block, T>(&*self.db, self.column, best_valid_from)?
.expect("self.best_entry().is_some() if there's entry for best_valid_from; qed");
loop {
let prev_valid_from = match entry.prev_valid_from {
Some(prev_valid_from) => prev_valid_from,
None => return Ok(None),
};
let prev_entry = read_storage_entry::<Block, T>(&*self.db, self.column, prev_valid_from)?
.expect("entry referenced from the next entry; entry exists when referenced; qed");
if at >= prev_valid_from {
return Ok(prev_entry.value);
}
entry = prev_entry;
}
}
}
/// Reads the entry at the block with given number.
fn read_storage_entry<Block, T>(
db: &KeyValueDB,
column: Option<u32>,
number: NumberFor<Block>
) -> ClientResult<Option<StorageEntry<NumberFor<Block>, T>>>
where
Block: BlockT,
NumberFor<Block>: As<u64>,
T: Codec,
{
db.get(column, &number_to_lookup_key(number))
.and_then(|entry| match entry {
Some(entry) => Ok(StorageEntry::<NumberFor<Block>, T>::decode(&mut &entry[..])),
None => Ok(None),
})
.map_err(db_err)
}
#[cfg(test)]
mod tests {
use runtime_primitives::testing::Block as RawBlock;
use light::{AUTHORITIES_ENTRIES_TO_KEEP, columns, LightStorage};
use light::tests::insert_block;
use super::*;
type Block = RawBlock<u64>;
#[test]
fn authorities_storage_entry_serialized() {
let test_cases: Vec<StorageEntry<u64, Vec<AuthorityId>>> = vec![
StorageEntry { prev_valid_from: Some(42), value: Some(vec![[1u8; 32].into()]) },
StorageEntry { prev_valid_from: None, value: Some(vec![[1u8; 32].into(), [2u8; 32].into()]) },
StorageEntry { prev_valid_from: None, value: None },
];
for expected in test_cases {
let serialized = expected.encode();
let deserialized = StorageEntry::decode(&mut &serialized[..]).unwrap();
assert_eq!(expected, deserialized);
}
}
#[test]
#[ignore] // TODO: unignore when cache reinstated.
fn best_authorities_are_updated() {
let db = LightStorage::new_test();
let authorities_at: Vec<(usize, Option<Entry<u64, Vec<AuthorityId>>>)> = vec![
(0, None),
(0, None),
(1, Some(Entry { valid_from: 1, value: Some(vec![[2u8; 32].into()]) })),
(1, Some(Entry { valid_from: 1, value: Some(vec![[2u8; 32].into()]) })),
(2, Some(Entry { valid_from: 3, value: Some(vec![[4u8; 32].into()]) })),
(2, Some(Entry { valid_from: 3, value: Some(vec![[4u8; 32].into()]) })),
(3, Some(Entry { valid_from: 5, value: None })),
(3, Some(Entry { valid_from: 5, value: None })),
];
// before any block, there are no entries in cache
assert!(db.cache().authorities_at_cache().best_entry().is_none());
assert_eq!(db.db().iter(columns::AUTHORITIES).count(), 0);
// insert blocks and check that best_authorities() returns correct result
let mut prev_hash = Default::default();
for number in 0..authorities_at.len() {
let authorities_at_number = authorities_at[number].1.clone().and_then(|e| e.value);
prev_hash = insert_block(&db, &prev_hash, number as u64, authorities_at_number);
assert_eq!(db.cache().authorities_at_cache().best_entry(), authorities_at[number].1);
assert_eq!(db.db().iter(columns::AUTHORITIES).count(), authorities_at[number].0);
}
// check that authorities_at() returns correct results for all retrospective blocks
for number in 1..authorities_at.len() + 1 {
assert_eq!(db.cache().authorities_at(BlockId::Number(number as u64)),
authorities_at.get(number + 1)
.or_else(|| authorities_at.last())
.unwrap().1.clone().and_then(|e| e.value));
}
// now check that cache entries are pruned when new blocks are inserted
let mut current_entries_count = authorities_at.last().unwrap().0;
let pruning_starts_at = AUTHORITIES_ENTRIES_TO_KEEP as usize;
for number in authorities_at.len()..authorities_at.len() + pruning_starts_at {
prev_hash = insert_block(&db, &prev_hash, number as u64, None);
if number > pruning_starts_at {
let prev_entries_count = authorities_at[number - pruning_starts_at].0;
let entries_count = authorities_at.get(number - pruning_starts_at + 1).map(|e| e.0)
.unwrap_or_else(|| authorities_at.last().unwrap().0);
current_entries_count -= entries_count - prev_entries_count;
}
// there's always at least 1 entry in the cache (after first insertion)
assert_eq!(db.db().iter(columns::AUTHORITIES).count(), ::std::cmp::max(current_entries_count, 1));
}
}
#[test]
fn best_authorities_are_pruned() {
let db = LightStorage::<Block>::new_test();
let mut transaction = DBTransaction::new();
// insert first entry at block#100
db.cache().authorities_at_cache().update_best_entry(
db.cache().authorities_at_cache().commit_best_entry(&mut transaction, 100, Some(vec![[1u8; 32].into()])));
db.db().write(transaction).unwrap();
// no entries are pruned, since there's only one entry in the cache
let mut transaction = DBTransaction::new();
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 50).unwrap(), 0);
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 100).unwrap(), 0);
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 0);
// insert second entry at block#200
let mut transaction = DBTransaction::new();
db.cache().authorities_at_cache().update_best_entry(
db.cache().authorities_at_cache().commit_best_entry(&mut transaction, 200, Some(vec![[2u8; 32].into()])));
db.db().write(transaction).unwrap();
let mut transaction = DBTransaction::new();
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 50).unwrap(), 0);
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 100).unwrap(), 1);
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 1);
// still only 1 entry is removed since pruning never deletes the last entry
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 200).unwrap(), 1);
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 250).unwrap(), 1);
// physically remove entry for block#100 from db
let mut transaction = DBTransaction::new();
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 150).unwrap(), 1);
db.db().write(transaction).unwrap();
assert_eq!(db.cache().authorities_at_cache().best_entry().unwrap().value, Some(vec![[2u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Number(50)), None);
assert_eq!(db.cache().authorities_at(BlockId::Number(100)), None);
assert_eq!(db.cache().authorities_at(BlockId::Number(150)), None);
assert_eq!(db.cache().authorities_at(BlockId::Number(200)), Some(vec![[2u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Number(250)), Some(vec![[2u8; 32].into()]));
// try to delete last entry => failure (no entries are removed)
let mut transaction = DBTransaction::new();
assert_eq!(db.cache().authorities_at_cache().prune_entries(&mut transaction, 300).unwrap(), 0);
db.db().write(transaction).unwrap();
assert_eq!(db.cache().authorities_at_cache().best_entry().unwrap().value, Some(vec![[2u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Number(50)), None);
assert_eq!(db.cache().authorities_at(BlockId::Number(100)), None);
assert_eq!(db.cache().authorities_at(BlockId::Number(150)), None);
assert_eq!(db.cache().authorities_at(BlockId::Number(200)), Some(vec![[2u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Number(250)), Some(vec![[2u8; 32].into()]));
}
}
This diff is collapsed.
// Copyright 2017 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! List-cache storage entries.
use client::error::Result as ClientResult;
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use cache::{CacheItemT, ComplexBlockId};
use cache::list_storage::{Storage};
/// Single list-based cache entry.
#[derive(Debug)]
#[cfg_attr(test, derive(PartialEq))]
pub struct Entry<Block: BlockT, T> {
/// first block, when this value became actual
pub valid_from: ComplexBlockId<Block>,
/// None means that we do not know the value starting from `valid_from` block
pub value: Option<T>,
}
/// Internal representation of the single list-based cache entry. The entry points to the
/// previous entry in the cache, allowing us to traverse back in time in list-style.
#[derive(Debug, Encode, Decode)]
#[cfg_attr(test, derive(Clone, PartialEq))]
pub struct StorageEntry<Block: BlockT, T: CacheItemT> {
/// None if valid from the beginning
pub prev_valid_from: Option<ComplexBlockId<Block>>,
/// None means that we do not know the value starting from `valid_from` block
pub value: Option<T>,
}
impl<Block: BlockT, T: CacheItemT> Entry<Block, T> {
/// Returns Some if the entry should be updated with the new value.
pub fn try_update(&self, value: Option<T>) -> Option<StorageEntry<Block, T>> {
match self.value == value {
true => None,
false => Some(StorageEntry {
prev_valid_from: Some(self.valid_from.clone()),
value,
}),
}
}
/// Wrapper that calls search_before to get range where the given block fits.
pub fn search_best_range_before<S: Storage<Block, T>>(
&self,
storage: &S,
block: NumberFor<Block>,
) -> ClientResult<Option<(ComplexBlockId<Block>, Option<ComplexBlockId<Block>>)>> {
Ok(self.search_best_before(storage, block, false)?
.map(|(entry, next)| (entry.valid_from, next)))
}
/// Searches the list, ending with THIS entry for the best entry preceeding (or at)
/// given block number.
/// If the entry is found, result is the entry and the block id of next entry (if exists).
/// NOTE that this function does not check that the passed block is actually linked to
/// the blocks it found.
pub fn search_best_before<S: Storage<Block, T>>(
&self,
storage: &S,
block: NumberFor<Block>,
require_value: bool,
) -> ClientResult<Option<(Entry<Block, T>, Option<ComplexBlockId<Block>>)>> {
// we're looking for the best value
let mut next = None;
let mut current = self.valid_from.clone();
if block >= self.valid_from.number {
let value = if require_value { self.value.clone() } else { None };
return Ok(Some((Entry { valid_from: current, value }, next)));
}
// else - travel back in time
loop {
let entry = storage.require_entry(&current)?;
if block >= current.number {
return Ok(Some((Entry { valid_from: current, value: entry.value }, next)));
}
next = Some(current);
current = match entry.prev_valid_from {
Some(prev_valid_from) => prev_valid_from,
None => return Ok(None),
};
}
}
}
impl<Block: BlockT, T: CacheItemT> StorageEntry<Block, T> {
/// Converts storage entry into an entry, valid from given block.
pub fn into_entry(self, valid_from: ComplexBlockId<Block>) -> Entry<Block, T> {
Entry {
valid_from,
value: self.value,
}
}
}
#[cfg(test)]
mod tests {
use cache::list_cache::tests::test_id;
use cache::list_storage::tests::{DummyStorage, FaultyStorage};
use super::*;
#[test]
fn entry_try_update_works() {
// when trying to update with the same None value
assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: None }.try_update(None), None);
// when trying to update with the same Some value
assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(1)), None);
// when trying to update with different None value
assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(None),
Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: None }));
// when trying to update with different Some value
assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(2)),
Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(2) }));
}
#[test]
fn entry_search_best_before_fails() {
// when storage returns error
assert!(Entry::<_, u64> { valid_from: test_id(100), value: None }.search_best_before(&FaultyStorage, 50, false).is_err());
}
#[test]
fn entry_search_best_before_works() {
// when block is better than our best block AND value is not required
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
.search_best_before(&DummyStorage::new(), 150, false).unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(100), value: None }, None)));
// when block is better than our best block AND value is required
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
.search_best_before(&DummyStorage::new(), 150, true).unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }, None)));
// when block is found between two entries
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
.search_best_before(&DummyStorage::new()
.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
.with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(50) }),
75, false).unwrap(),
Some((Entry::<_, u64> { valid_from: test_id(50), value: Some(50) }, Some(test_id(100)))));
// when block is not found
assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }
.search_best_before(&DummyStorage::new()
.with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) })
.with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }),
30, true).unwrap(),
None);
}
}
// Copyright 2017 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! List-cache storage definition and implementation.
use std::sync::Arc;
use kvdb::{KeyValueDB, DBTransaction};
use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult};
use codec::{Encode, Decode};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use utils::{self, db_err, meta_keys};
use cache::{CacheItemT, ComplexBlockId};
use cache::list_cache::{CommitOperation, Fork};
use cache::list_entry::{Entry, StorageEntry};
/// Single list-cache metadata.
#[derive(Debug)]
#[cfg_attr(test, derive(Clone, PartialEq))]
pub struct Metadata<Block: BlockT> {
/// Block at which best finalized entry is stored.
pub finalized: Option<ComplexBlockId<Block>>,
/// A set of blocks at which best unfinalized entries are stored.
pub unfinalized: Vec<ComplexBlockId<Block>>,
}
/// Readonly list-cache storage trait.
pub trait Storage<Block: BlockT, T: CacheItemT> {
/// Reads hash of the block at given number.
fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>>;
/// Reads header of the block with given hash.
fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>>;
/// Reads cache metadata: best finalized entry (if some) and the list.
fn read_meta(&self) -> ClientResult<Metadata<Block>>;
/// Reads cache entry from the storage.
fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>>;
/// Reads referenced (and thus existing) cache entry from the storage.
fn require_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<StorageEntry<Block, T>> {
self.read_entry(at)
.and_then(|entry| entry
.ok_or_else(|| ClientError::from(
ClientErrorKind::Backend(format!("Referenced cache entry at {:?} is not found", at)))))
}
}
/// List-cache storage transaction.
pub trait StorageTransaction<Block: BlockT, T: CacheItemT> {
/// Insert storage entry at given block.
fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, entry: &StorageEntry<Block, T>);
/// Delete storage entry at given block.
fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>);
/// Update metadata of the cache.
fn update_meta(
&mut self,
best_finalized_entry: Option<&Entry<Block, T>>,
unfinalized: &[Fork<Block, T>],
operation: &CommitOperation<Block, T>,
);
}
/// A set of columns used by the DbStorage.
#[derive(Debug)]
pub struct DbColumns {
/// Column holding cache meta.
pub meta: Option<u32>,
/// Column holding the mapping of { block number => block hash } for blocks of the best chain.
pub hash_lookup: Option<u32>,
/// Column holding the mapping of { block hash => block header }.
pub header: Option<u32>,
/// Column holding cache entries.
pub cache: Option<u32>,
}
/// Database-backed list cache storage.
pub struct DbStorage {
name: Vec<u8>,
meta_key: Vec<u8>,
db: Arc<KeyValueDB>,
columns: DbColumns,
}
impl DbStorage {
/// Create new database-backed list cache storage.
pub fn new(name: Vec<u8>, db: Arc<KeyValueDB>, columns: DbColumns) -> Self {
let meta_key = meta::key(&name);
DbStorage { name, meta_key, db, columns }
}
/// Get reference to the database.
pub fn db(&self) -> &Arc<KeyValueDB> { &self.db }
/// Get reference to the database columns.
pub fn columns(&self) -> &DbColumns { &self.columns }
/// Encode block id for storing as a key in cache column.
/// We append prefix to the actual encoding to allow several caches
/// store entries in the same column.
pub fn encode_block_id<Block: BlockT>(&self, block: &ComplexBlockId<Block>) -> Vec<u8> {
let mut encoded = self.name.clone();
encoded.extend(block.hash.as_ref());
encoded
}
}
impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for DbStorage {
fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
utils::read_id::<Block>(&*self.db, self.columns.hash_lookup, BlockId::Number(at))
}
fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
utils::read_header::<Block>(&*self.db, self.columns.hash_lookup, self.columns.header, BlockId::Hash(*at))
}
fn read_meta(&self) -> ClientResult<Metadata<Block>> {
self.db.get(self.columns.meta, &self.meta_key)
.map_err(db_err)
.and_then(|meta| match meta {
Some(meta) => meta::decode(&*meta),
None => Ok(Metadata {
finalized: None,
unfinalized: Vec::new(),
}),
})
}
fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
self.db.get(self.columns.cache, &self.encode_block_id(at))
.map_err(db_err)
.and_then(|entry| match entry {
Some(entry) => StorageEntry::<Block, T>::decode(&mut &entry[..])
.ok_or_else(|| ClientErrorKind::Backend("Failed to decode cache entry".into()).into())
.map(Some),
None => Ok(None),
})
}
}
/// Database-backed list cache storage transaction.
pub struct DbStorageTransaction<'a> {
storage: &'a DbStorage,
tx: &'a mut DBTransaction,
}
impl<'a> DbStorageTransaction<'a> {
/// Create new database transaction.
pub fn new(storage: &'a DbStorage, tx: &'a mut DBTransaction) -> Self {
DbStorageTransaction { storage, tx }
}
}
impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DbStorageTransaction<'a> {
fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, entry: &StorageEntry<Block, T>) {
self.tx.put(self.storage.columns.cache, &self.storage.encode_block_id(at), &entry.encode());
}
fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>) {
self.tx.delete(self.storage.columns.cache, &self.storage.encode_block_id(at));
}
fn update_meta(
&mut self,
best_finalized_entry: Option<&Entry<Block, T>>,
unfinalized: &[Fork<Block, T>],
operation: &CommitOperation<Block, T>,
) {
self.tx.put(
self.storage.columns.meta,
&self.storage.meta_key,
&meta::encode(best_finalized_entry, unfinalized, operation));
}
}
/// Metadata related functions.
mod meta {
use super::*;
/// Convert cache name into cache metadata key.
pub fn key(name: &[u8]) -> Vec<u8> {
let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec();
key_name.extend_from_slice(name);
key_name
}
/// Encode cache metadata 'applying' commit operation before encoding.
pub fn encode<Block: BlockT, T: CacheItemT>(
best_finalized_entry: Option<&Entry<Block, T>>,
unfinalized: &[Fork<Block, T>],
op: &CommitOperation<Block, T>
) -> Vec<u8> {
let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from);
let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::<Vec<_>>();
match op {
CommitOperation::AppendNewBlock(_, _) => (),
CommitOperation::AppendNewEntry(index, ref entry) => {
unfinalized[*index] = &entry.valid_from;
},
CommitOperation::AddNewFork(ref entry) => {
unfinalized.push(&entry.valid_from);
},
CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => {
finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from);
for fork_index in forks.iter().rev() {
unfinalized.remove(*fork_index);
}
},
}
(finalized, unfinalized).encode()
}
/// Decode meta information.
pub fn decode<Block: BlockT>(encoded: &[u8]) -> ClientResult<Metadata<Block>> {
let input = &mut &*encoded;
let finalized: Option<ComplexBlockId<Block>> = Decode::decode(input)
.ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?;
let unfinalized: Vec<ComplexBlockId<Block>> = Decode::decode(input)
.ok_or_else(|| ClientError::from(ClientErrorKind::Backend("Error decoding cache meta".into())))?;
Ok(Metadata { finalized, unfinalized })
}
}
#[cfg(test)]
pub mod tests {
use std::collections::{HashMap, HashSet};
use runtime_primitives::traits::Header as HeaderT;
use super::*;
pub struct FaultyStorage;
impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for FaultyStorage {
fn read_id(&self, _at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
Err(ClientErrorKind::Backend("TestError".into()).into())
}
fn read_header(&self, _at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
Err(ClientErrorKind::Backend("TestError".into()).into())
}
fn read_meta(&self) -> ClientResult<Metadata<Block>> {
Err(ClientErrorKind::Backend("TestError".into()).into())
}
fn read_entry(&self, _at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
Err(ClientErrorKind::Backend("TestError".into()).into())
}
}
pub struct DummyStorage<Block: BlockT, T: CacheItemT> {
meta: Metadata<Block>,
ids: HashMap<NumberFor<Block>, Block::Hash>,
headers: HashMap<Block::Hash, Block::Header>,
entries: HashMap<Block::Hash, StorageEntry<Block, T>>,
}
impl<Block: BlockT, T: CacheItemT> DummyStorage<Block, T> {
pub fn new() -> Self {
DummyStorage {
meta: Metadata {
finalized: None,
unfinalized: Vec::new(),
},
ids: HashMap::new(),
headers: HashMap::new(),
entries: HashMap::new(),
}
}
pub fn with_meta(mut self, finalized: Option<ComplexBlockId<Block>>, unfinalized: Vec<ComplexBlockId<Block>>) -> Self {
self.meta.finalized = finalized;
self.meta.unfinalized = unfinalized;
self
}
pub fn with_id(mut self, at: NumberFor<Block>, id: Block::Hash) -> Self {
self.ids.insert(at, id);
self
}
pub fn with_header(mut self, header: Block::Header) -> Self {
self.headers.insert(header.hash(), header);
self
}
pub fn with_entry(mut self, at: ComplexBlockId<Block>, entry: StorageEntry<Block, T>) -> Self {
self.entries.insert(at.hash, entry);
self
}
}
impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for DummyStorage<Block, T> {
fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
Ok(self.ids.get(&at).cloned())
}
fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
Ok(self.headers.get(&at).cloned())
}
fn read_meta(&self) -> ClientResult<Metadata<Block>> {
Ok(self.meta.clone())
}
fn read_entry(&self, at: &ComplexBlockId<Block>) -> ClientResult<Option<StorageEntry<Block, T>>> {
Ok(self.entries.get(&at.hash).cloned())
}
}
pub struct DummyTransaction<Block: BlockT> {
updated_meta: Option<Metadata<Block>>,
inserted_entries: HashSet<Block::Hash>,
removed_entries: HashSet<Block::Hash>,
}
impl<Block: BlockT> DummyTransaction<Block> {
pub fn new() -> Self {
DummyTransaction {
updated_meta: None,
inserted_entries: HashSet::new(),
removed_entries: HashSet::new(),
}
}
pub fn inserted_entries(&self) -> &HashSet<Block::Hash> {
&self.inserted_entries
}
pub fn removed_entries(&self) -> &HashSet<Block::Hash> {
&self.removed_entries
}
pub fn updated_meta(&self) -> &Option<Metadata<Block>> {
&self.updated_meta
}
}
impl<Block: BlockT, T: CacheItemT> StorageTransaction<Block, T> for DummyTransaction<Block> {
fn insert_storage_entry(&mut self, at: &ComplexBlockId<Block>, _entry: &StorageEntry<Block, T>) {
self.inserted_entries.insert(at.hash);
}
fn remove_storage_entry(&mut self, at: &ComplexBlockId<Block>) {
self.removed_entries.insert(at.hash);
}
fn update_meta(
&mut self,
best_finalized_entry: Option<&Entry<Block, T>>,
unfinalized: &[Fork<Block, T>],
operation: &CommitOperation<Block, T>,
) {
self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap());
}
}
}
// Copyright 2017 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! DB-backed cache of blockchain data.
use std::sync::Arc;
use parking_lot::RwLock;
use kvdb::{KeyValueDB, DBTransaction};
use client::blockchain::Cache as BlockchainCache;
use client::error::Result as ClientResult;
use codec::{Encode, Decode};
use primitives::AuthorityId;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As};
use utils::{self, COLUMN_META};
use self::list_cache::ListCache;
mod list_cache;
mod list_entry;
mod list_storage;
/// Minimal post-finalization age age of finalized blocks before they'll pruned.
const PRUNE_DEPTH: u64 = 1024;
/// Block identifier that holds both hash and number.
#[derive(Clone, Debug, Encode, Decode, PartialEq)]
pub struct ComplexBlockId<Block: BlockT> {
hash: Block::Hash,
number: NumberFor<Block>,
}
impl<Block: BlockT> ComplexBlockId<Block> {
/// Create new complex block id.
pub fn new(hash: Block::Hash, number: NumberFor<Block>) -> Self {
ComplexBlockId { hash, number }
}
}
impl<Block: BlockT> ::std::cmp::PartialOrd for ComplexBlockId<Block> {
fn partial_cmp(&self, other: &ComplexBlockId<Block>) -> Option<::std::cmp::Ordering> {
self.number.partial_cmp(&other.number)
}
}
/// All cache items must implement this trait.
pub trait CacheItemT: Clone + Decode + Encode + PartialEq {}
impl<T> CacheItemT for T where T: Clone + Decode + Encode + PartialEq {}
/// Database-backed blockchain data cache.
pub struct DbCache<Block: BlockT> {
authorities_at: ListCache<Block, Vec<AuthorityId>, self::list_storage::DbStorage>,
}
impl<Block: BlockT> DbCache<Block> {
/// Create new cache.
pub fn new(
db: Arc<KeyValueDB>,
hash_lookup_column: Option<u32>,
header_column: Option<u32>,
authorities_column: Option<u32>,
best_finalized_block: ComplexBlockId<Block>,
) -> Self {
DbCache {
authorities_at: ListCache::new(
self::list_storage::DbStorage::new(b"auth".to_vec(), db,
self::list_storage::DbColumns {
meta: COLUMN_META,
hash_lookup: hash_lookup_column,
header: header_column,
cache: authorities_column,
},
),
As::sa(PRUNE_DEPTH),
best_finalized_block,
),
}
}
/// Begin cache transaction.
pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> {
DbCacheTransaction {
cache: self,
tx,
authorities_at_op: None,
}
}
/// Run post-commit cache operations.
pub fn commit(&mut self, ops: DbCacheTransactionOps<Block>) {
if let Some(authorities_at_op) = ops.authorities_at_op {
self.authorities_at.on_transaction_commit(authorities_at_op);
}
}
}
/// Cache operations that are to be committed after database transaction is committed.
pub struct DbCacheTransactionOps<Block: BlockT> {
authorities_at_op: Option<self::list_cache::CommitOperation<Block, Vec<AuthorityId>>>,
}
/// Database-backed blockchain data cache transaction valid for single block import.
pub struct DbCacheTransaction<'a, Block: BlockT> {
cache: &'a mut DbCache<Block>,
tx: &'a mut DBTransaction,
authorities_at_op: Option<self::list_cache::CommitOperation<Block, Vec<AuthorityId>>>,
}
impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
/// Convert transaction into post-commit operations set.
pub fn into_ops(self) -> DbCacheTransactionOps<Block> {
DbCacheTransactionOps {
authorities_at_op: self.authorities_at_op,
}
}
/// When new block is inserted into database.
pub fn on_block_insert(
mut self,
parent: ComplexBlockId<Block>,
block: ComplexBlockId<Block>,
authorities_at: Option<Vec<AuthorityId>>,
is_final: bool,
) -> ClientResult<Self> {
assert!(self.authorities_at_op.is_none());
self.authorities_at_op = self.cache.authorities_at.on_block_insert(
&mut self::list_storage::DbStorageTransaction::new(
self.cache.authorities_at.storage(),
&mut self.tx
),
parent,
block,
authorities_at,
is_final,
)?;
Ok(self)
}
/// When previously inserted block is finalized.
pub fn on_block_finalize(
mut self,
parent: ComplexBlockId<Block>,
block: ComplexBlockId<Block>
) -> ClientResult<Self> {
assert!(self.authorities_at_op.is_none());
self.authorities_at_op = self.cache.authorities_at.on_block_finalize(
&mut self::list_storage::DbStorageTransaction::new(
self.cache.authorities_at.storage(),
&mut self.tx
),
parent,
block,
)?;
Ok(self)
}
}
/// Synchronous implementation of database-backed blockchain data cache.
pub struct DbCacheSync<Block: BlockT>(pub RwLock<DbCache<Block>>);
impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
fn authorities_at(&self, at: BlockId<Block>) -> Option<Vec<AuthorityId>> {
let cache = self.0.read();
let storage = cache.authorities_at.storage();
let db = storage.db();
let columns = storage.columns();
let at = match at {
BlockId::Hash(hash) => {
let header = utils::read_header::<Block>(
&**db,
columns.hash_lookup,
columns.header,
BlockId::Hash(hash.clone())).ok()??;
ComplexBlockId::new(hash, *header.number())
},
BlockId::Number(number) => {
let hash = utils::read_id::<Block>(
&**db,
columns.hash_lookup,
BlockId::Number(number.clone())).ok()??;
ComplexBlockId::new(hash, number)
},
};
cache.authorities_at.value_at_block(&at).ok()?
}
}
......@@ -32,7 +32,7 @@ use primitives::{AuthorityId, Blake2Hasher};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
Zero, One, As, NumberFor};
use cache::DbCache;
use cache::{DbCacheSync, DbCache, ComplexBlockId};
use utils::{meta_keys, Meta, db_err, number_to_lookup_key, open_database,
read_db, read_id, read_meta};
use DatabaseSettings;
......@@ -41,20 +41,17 @@ pub(crate) mod columns {
pub const META: Option<u32> = ::utils::COLUMN_META;
pub const HASH_LOOKUP: Option<u32> = Some(1);
pub const HEADER: Option<u32> = Some(2);
pub const AUTHORITIES: Option<u32> = Some(3);
pub const CACHE: Option<u32> = Some(3);
pub const CHT: Option<u32> = Some(4);
}
/// Keep authorities for last 'AUTHORITIES_ENTRIES_TO_KEEP' blocks.
#[allow(unused)]
pub(crate) const AUTHORITIES_ENTRIES_TO_KEEP: u64 = cht::SIZE;
/// Light blockchain storage. Stores most recent headers + CHTs for older headers.
/// Locks order: meta, leaves, cache.
pub struct LightStorage<Block: BlockT> {
db: Arc<KeyValueDB>,
meta: RwLock<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>>,
leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
_cache: DbCache<Block>,
cache: DbCacheSync<Block>,
}
#[derive(Clone, PartialEq, Debug)]
......@@ -86,31 +83,27 @@ impl<Block> LightStorage<Block>
}
fn from_kvdb(db: Arc<KeyValueDB>) -> ClientResult<Self> {
let meta = read_meta::<Block>(&*db, columns::META, columns::HEADER)?;
let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
let cache = DbCache::new(
db.clone(),
columns::HASH_LOOKUP,
columns::HEADER,
columns::AUTHORITIES
)?;
let meta = RwLock::new(read_meta::<Block>(&*db, columns::META, columns::HEADER)?);
let leaves = RwLock::new(LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?);
columns::CACHE,
ComplexBlockId::new(meta.finalized_hash, meta.finalized_number),
);
Ok(LightStorage {
db,
meta,
leaves,
_cache: cache,
meta: RwLock::new(meta),
cache: DbCacheSync(RwLock::new(cache)),
leaves: RwLock::new(leaves),
})
}
#[cfg(test)]
pub(crate) fn db(&self) -> &Arc<KeyValueDB> {
&self.db
}
#[cfg(test)]
pub(crate) fn cache(&self) -> &DbCache<Block> {
&self._cache
pub(crate) fn cache(&self) -> &DbCacheSync<Block> {
&self.cache
}
fn update_meta(
......@@ -187,8 +180,12 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
}
impl<Block: BlockT> LightStorage<Block> {
// note that a block is finalized. only call with child of last finalized block.
fn note_finalized(&self, transaction: &mut DBTransaction, header: &Block::Header, hash: Block::Hash) -> ClientResult<()> {
fn note_finalized(
&self,
transaction: &mut DBTransaction,
header: &Block::Header,
hash: Block::Hash,
) -> ClientResult<()> {
let meta = self.meta.read();
if &meta.finalized_hash != header.parent_hash() {
return Err(::client::error::ErrorKind::NonSequentialFinalization(
......@@ -236,7 +233,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
fn import_header(
&self,
header: Block::Header,
_authorities: Option<Vec<AuthorityId>>,
authorities: Option<Vec<AuthorityId>>,
leaf_state: NewBlockState,
) -> ClientResult<()> {
let mut transaction = DBTransaction::new();
......@@ -246,11 +243,8 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let parent_hash = *header.parent_hash();
transaction.put(columns::HEADER, hash.as_ref(), &header.encode());
transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
if leaf_state.is_best() {
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
// handle reorg.
{
let meta = self.meta.read();
......@@ -286,7 +280,8 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
}
}
// TODO: cache authorities for previous block, accounting for reorgs.
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
}
let finalized = match leaf_state {
......@@ -302,6 +297,16 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let mut leaves = self.leaves.write();
let displaced_leaf = leaves.import(hash, number, parent_hash);
let mut cache = self.cache.0.write();
let cache_ops = cache.transaction(&mut transaction)
.on_block_insert(
ComplexBlockId::new(*header.parent_hash(), if number == Zero::zero() { Zero::zero() } else { number - One::one() }),
ComplexBlockId::new(hash, number),
authorities,
finalized,
)?
.into_ops();
debug!("Light DB Commit {:?} ({})", hash, number);
let write_result = self.db.write(transaction).map_err(db_err);
if let Err(e) = write_result {
......@@ -311,7 +316,10 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
}
return Err(e);
}
cache.commit(cache_ops);
}
self.update_meta(hash, number, leaf_state.is_best(), finalized);
Ok(())
......@@ -332,9 +340,22 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let mut transaction = DBTransaction::new();
// TODO: ensure best chain contains this block.
let hash = header.hash();
let number = *header.number();
self.note_finalized(&mut transaction, &header, hash.clone())?;
self.db.write(transaction).map_err(db_err)?;
{
let mut cache = self.cache.0.write();
let cache_ops = cache.transaction(&mut transaction)
.on_block_finalize(
ComplexBlockId::new(*header.parent_hash(), if number == Zero::zero() { Zero::zero() } else { number - One::one() }),
ComplexBlockId::new(hash, number)
)?
.into_ops();
self.db.write(transaction).map_err(db_err)?;
cache.commit(cache_ops);
}
self.update_meta(hash, header.number().clone(), false, true);
Ok(())
} else {
Err(ClientErrorKind::UnknownBlock(format!("Cannot finalize block {:?}", id)).into())
......@@ -358,6 +379,16 @@ pub(crate) mod tests {
type Block = RawBlock<u32>;
fn prepare_header(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header {
Header {
number: number.into(),
parent_hash: *parent,
state_root: Hash::random(),
digest: Default::default(),
extrinsics_root,
}
}
pub fn insert_block_with_extrinsics_root(
db: &LightStorage<Block>,
parent: &Hash,
......@@ -365,14 +396,7 @@ pub(crate) mod tests {
authorities: Option<Vec<AuthorityId>>,
extrinsics_root: Hash,
) -> Hash {
let header = Header {
number: number.into(),
parent_hash: *parent,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root,
};
let header = prepare_header(parent, number, extrinsics_root);
let hash = header.hash();
db.import_header(header, authorities, NewBlockState::Best).unwrap();
hash
......@@ -384,19 +408,36 @@ pub(crate) mod tests {
number: u64,
authorities: Option<Vec<AuthorityId>>
) -> Hash {
let header = Header {
number: number.into(),
parent_hash: *parent,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let header = prepare_header(parent, number, Default::default());
let hash = header.hash();
db.import_header(header, authorities, NewBlockState::Best).unwrap();
hash
}
fn insert_final_block(
db: &LightStorage<Block>,
parent: &Hash,
number: u64,
authorities: Option<Vec<AuthorityId>>
) -> Hash {
let header = prepare_header(parent, number, Default::default());
let hash = header.hash();
db.import_header(header, authorities, NewBlockState::Final).unwrap();
hash
}
fn insert_non_best_block(
db: &LightStorage<Block>,
parent: &Hash,
number: u64,
authorities: Option<Vec<AuthorityId>>
) -> Hash {
let header = prepare_header(parent, number, Default::default());
let hash = header.hash();
db.import_header(header, authorities, NewBlockState::Normal).unwrap();
hash
}
#[test]
fn returns_known_header() {
let db = LightStorage::new_test();
......@@ -464,7 +505,7 @@ pub(crate) mod tests {
let db = LightStorage::new_test();
// insert genesis block header (never pruned)
let mut prev_hash = insert_block(&db, &Default::default(), 0, None);
let mut prev_hash = insert_final_block(&db, &Default::default(), 0, None);
// insert SIZE blocks && ensure that nothing is pruned
for number in 0..cht::SIZE {
......@@ -511,7 +552,7 @@ pub(crate) mod tests {
let db = LightStorage::new_test();
// insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created
let mut prev_hash = insert_block(&db, &Default::default(), 0, None);
let mut prev_hash = insert_final_block(&db, &Default::default(), 0, None);
for i in 1..1 + cht::SIZE + cht::SIZE + 1 {
prev_hash = insert_block(&db, &prev_hash, i as u64, None);
db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
......@@ -586,4 +627,123 @@ pub(crate) mod tests {
assert!(tree_route.enacted().is_empty());
}
}
#[test]
fn authorites_are_cached() {
let db = LightStorage::new_test();
fn run_checks(db: &LightStorage<Block>, max: u64, checks: &[(u64, Option<Vec<AuthorityId>>)]) {
for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) {
let actual = db.cache().authorities_at(BlockId::Number(*at));
assert_eq!(*expected, actual);
}
}
let (hash2, hash6) = {
// first few blocks are instantly finalized
// B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(None)
let checks = vec![
(0, None),
(1, None),
(2, Some(vec![[1u8; 32].into()])),
(3, Some(vec![[1u8; 32].into()])),
(4, Some(vec![[1u8; 32].into(), [2u8; 32].into()])),
(5, Some(vec![[1u8; 32].into(), [2u8; 32].into()])),
(6, None),
(7, None), // block will work for 'future' block too
];
let hash0 = insert_final_block(&db, &Default::default(), 0, None);
run_checks(&db, 0, &checks);
let hash1 = insert_final_block(&db, &hash0, 1, None);
run_checks(&db, 1, &checks);
let hash2 = insert_final_block(&db, &hash1, 2, Some(vec![[1u8; 32].into()]));
run_checks(&db, 2, &checks);
let hash3 = insert_final_block(&db, &hash2, 3, Some(vec![[1u8; 32].into()]));
run_checks(&db, 3, &checks);
let hash4 = insert_final_block(&db, &hash3, 4, Some(vec![[1u8; 32].into(), [2u8; 32].into()]));
run_checks(&db, 4, &checks);
let hash5 = insert_final_block(&db, &hash4, 5, Some(vec![[1u8; 32].into(), [2u8; 32].into()]));
run_checks(&db, 5, &checks);
let hash6 = insert_final_block(&db, &hash5, 6, None);
run_checks(&db, 7, &checks);
(hash2, hash6)
};
{
// some older non-best blocks are inserted
// ... -> B2(1) -> B2_1(1) -> B2_2(2)
// => the cache ignores all writes before best finalized block
let hash2_1 = insert_non_best_block(&db, &hash2, 3, Some(vec![[1u8; 32].into()]));
assert_eq!(None, db.cache().authorities_at(BlockId::Hash(hash2_1)));
let hash2_2 = insert_non_best_block(&db, &hash2_1, 4, Some(vec![[1u8; 32].into(), [2u8; 32].into()]));
assert_eq!(None, db.cache().authorities_at(BlockId::Hash(hash2_2)));
}
let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = {
// inserting non-finalized blocks
// B6(None) -> B7(3) -> B8(3)
// \> B6_1(4) -> B6_2(4)
// \> B6_1_1(5)
// \> B6_1_2(6) -> B6_1_3(7)
let hash7 = insert_block(&db, &hash6, 7, Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
let hash8 = insert_block(&db, &hash7, 8, Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
let hash6_1 = insert_block(&db, &hash6, 7, Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
let hash6_1_1 = insert_non_best_block(&db, &hash6_1, 8, Some(vec![[5u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
let hash6_1_2 = insert_non_best_block(&db, &hash6_1, 8, Some(vec![[6u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), Some(vec![[6u8; 32].into()]));
let hash6_2 = insert_block(&db, &hash6_1, 8, Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), Some(vec![[3u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), Some(vec![[6u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_2)), Some(vec![[4u8; 32].into()]));
(hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2)
};
{
// finalize block hash6_1
db.finalize_header(BlockId::Hash(hash6_1)).unwrap();
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), Some(vec![[5u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), Some(vec![[6u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_2)), Some(vec![[4u8; 32].into()]));
// finalize block hash6_2
db.finalize_header(BlockId::Hash(hash6_2)).unwrap();
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash7)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash8)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1)), Some(vec![[4u8; 32].into()]));
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_1)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_1_2)), None);
assert_eq!(db.cache().authorities_at(BlockId::Hash(hash6_2)), Some(vec![[4u8; 32].into()]));
}
}
}
......@@ -44,8 +44,8 @@ pub mod meta_keys {
pub const BEST_BLOCK: &[u8; 4] = b"best";
/// Last finalized block key.
pub const FINALIZED_BLOCK: &[u8; 5] = b"final";
/// Best authorities block key.
pub const BEST_AUTHORITIES: &[u8; 4] = b"auth";
/// Meta information prefix for list-based caches.
pub const CACHE_META_PREFIX: &[u8; 5] = b"cache";
/// Genesis block hash.
pub const GENESIS_HASH: &[u8; 3] = b"gen";
/// Leaves prefix list key.
......@@ -82,17 +82,6 @@ pub fn number_to_lookup_key<N>(n: N) -> BlockLookupKey where N: As<u64> {
]
}
/// Convert block lookup key into block number.
pub fn lookup_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
match key.len() {
4 => Ok((key[0] as u64) << 24
| (key[1] as u64) << 16
| (key[2] as u64) << 8
| (key[3] as u64)).map(As::sa),
_ => Err(client::error::ErrorKind::Backend("Invalid block key".into()).into()),
}
}
/// Maps database error to client error
pub fn db_err(err: io::Error) -> client::error::Error {
use std::error::Error;
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment