Newer
Older
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use receipt::Receipt;
use chainfilter::{ChainFilter, BloomIndex, FilterDataSource};
Marek Kotewicz
committed
use blockchain::block_info::{BlockInfo, BlockLocation};
use blockchain::best_block::BestBlock;
use blockchain::bloom_indexer::BloomIndexer;
use blockchain::tree_route::TreeRoute;
Marek Kotewicz
committed
use blockchain::CacheSize;
const BLOOM_INDEX_SIZE: usize = 16;
const BLOOM_LEVELS: u8 = 3;
/// Blockchain configuration.
#[derive(Debug)]
pub struct BlockChainConfig {
/// Preferred cache size in bytes.
pub pref_cache_size: usize,
/// Maximum cache size in bytes.
pub max_cache_size: usize,
}
impl Default for BlockChainConfig {
fn default() -> Self {
BlockChainConfig {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
}
}
}
/// Interface for querying blocks by hash and by number.
pub trait BlockProvider {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known(&self, hash: &H256) -> bool;
/// Get raw block data
fn block(&self, hash: &H256) -> Option<Bytes>;
/// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option<BlockDetails>;
/// Get the hash of given block's number.
fn block_hash(&self, index: BlockNumber) -> Option<H256>;
/// Get the address of transaction with given hash.
fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress>;
/// Get receipts of block with given hash.
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts>;
/// Get the partial-header of a block.
fn block_header(&self, hash: &H256) -> Option<Header> {
self.block(hash).map(|bytes| BlockView::new(&bytes).header())
}
/// Get a list of uncles for a given block.
/// Returns None if block deos not exist.
fn uncles(&self, hash: &H256) -> Option<Vec<Header>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncles())
}
/// Get a list of uncle hashes for a given block.
/// Returns None if block does not exist.
fn uncle_hashes(&self, hash: &H256) -> Option<Vec<H256>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).uncle_hashes())
}
/// Get the number of given block's hash.
fn block_number(&self, hash: &H256) -> Option<BlockNumber> {
self.block(hash).map(|bytes| BlockView::new(&bytes).header_view().number())
}
/// Get transaction with given transaction hash.
fn transaction(&self, address: &TransactionAddress) -> Option<LocalizedTransaction> {
self.block(&address.block_hash).and_then(|bytes| BlockView::new(&bytes).localized_transaction_at(address.index))
/// Returns None if block does not exist.
fn transactions(&self, hash: &H256) -> Option<Vec<LocalizedTransaction>> {
self.block(hash).map(|bytes| BlockView::new(&bytes).localized_transactions())
}
/// Returns reference to genesis hash.
fn genesis_hash(&self) -> H256 {
self.block_hash(0).expect("Genesis hash should always exist")
}
/// Returns the header of the genesis block.
fn genesis_header(&self) -> Header {
self.block_header(&self.genesis_hash()).unwrap()
}
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber>;
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
enum CacheID {
Block(H256),
Extras(ExtrasIndex, H256),
}
struct CacheManager {
cache_usage: VecDeque<HashSet<CacheID>>,
in_use: HashSet<CacheID>,
}
/// Structure providing fast access to blockchain data.
pref_cache_size: usize,
max_cache_size: usize,
block_hashes: RwLock<HashMap<BlockNumber, H256>>,
transaction_addresses: RwLock<HashMap<H256, TransactionAddress>>,
block_logs: RwLock<HashMap<H256, BlockLogBlooms>>,
blocks_blooms: RwLock<HashMap<H256, BlocksBlooms>>,
block_receipts: RwLock<HashMap<H256, BlockReceipts>>,
extras_db: Database,
blocks_db: Database,
cache_man: RwLock<CacheManager>,
}
impl FilterDataSource for BlockChain {
fn bloom_at_index(&self, bloom_index: &BloomIndex) -> Option<H2048> {
let location = self.bloom_indexer.location(bloom_index);
self.blocks_blooms(&location.hash).and_then(|blooms| blooms.blooms.into_iter().nth(location.index).cloned())
}
impl BlockProvider for BlockChain {
/// Returns true if the given block is known
/// (though not necessarily a part of the canon chain).
fn is_known(&self, hash: &H256) -> bool {
self.query_extras_exist(hash, &self.block_details)
}
/// Get raw block data
fn block(&self, hash: &H256) -> Option<Bytes> {
{
let read = self.blocks.read().unwrap();
if let Some(v) = read.get(hash) {
return Some(v.clone());
}
}
let opt = self.blocks_db.get(hash)
.expect("Low level database error. Some issue with disk?");
match opt {
Some(b) => {
let bytes: Bytes = b.to_vec();
let mut write = self.blocks.write().unwrap();
write.insert(hash.clone(), bytes.clone());
Some(bytes)
},
None => None
}
}
/// Get the familial details concerning a block.
fn block_details(&self, hash: &H256) -> Option<BlockDetails> {
self.query_extras(hash, &self.block_details)
}
/// Get the hash of given block's number.
fn block_hash(&self, index: BlockNumber) -> Option<H256> {
self.query_extras(&index, &self.block_hashes)
}
/// Get the address of transaction with given hash.
fn transaction_address(&self, hash: &H256) -> Option<TransactionAddress> {
self.query_extras(hash, &self.transaction_addresses)
}
/// Get receipts of block with given hash.
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
self.query_extras(hash, &self.block_receipts)
}
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockNumber, to_block: BlockNumber) -> Vec<BlockNumber> {
let filter = ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels());
filter.blocks_with_bloom(bloom, from_block as usize, to_block as usize).into_iter().map(|b| b as BlockNumber).collect()
}
pub struct AncestryIter<'a> {
current: H256,
chain: &'a BlockChain,
}
impl<'a> Iterator for AncestryIter<'a> {
type Item = H256;
fn next(&mut self) -> Option<H256> {
if self.current.is_zero() {
Option::None
} else {
let mut n = self.chain.block_details(&self.current).unwrap().parent;
mem::swap(&mut self.current, &mut n);
Some(n)
/// Create new instance of blockchain from given Genesis
pub fn new(config: BlockChainConfig, genesis: &[u8], path: &Path) -> BlockChain {
let mut extras_path = path.to_path_buf();
extras_path.push("extras");
let extras_db = Database::open_default(extras_path.to_str().unwrap()).unwrap();
let mut blocks_path = path.to_path_buf();
blocks_path.push("blocks");
let blocks_db = Database::open_default(blocks_path.to_str().unwrap()).unwrap();
let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
pref_cache_size: config.pref_cache_size,
max_cache_size: config.max_cache_size,
best_block: RwLock::new(BestBlock::default()),
blocks: RwLock::new(HashMap::new()),
block_details: RwLock::new(HashMap::new()),
block_hashes: RwLock::new(HashMap::new()),
transaction_addresses: RwLock::new(HashMap::new()),
block_logs: RwLock::new(HashMap::new()),
blocks_blooms: RwLock::new(HashMap::new()),
block_receipts: RwLock::new(HashMap::new()),
bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS)
// load best block
let best_block_hash = match bc.extras_db.get(b"best").unwrap() {
Some(best) => H256::from_slice(&best),
None => {
// best block does not exist
// we need to insert genesis into the cache
let block = BlockView::new(genesis);
let header = block.header_view();
let hash = block.sha3();
let details = BlockDetails {
number: header.number(),
total_difficulty: header.difficulty(),
parent: header.parent_hash(),
children: vec![]
};
bc.blocks_db.put(&hash, genesis).unwrap();
batch.put_extras(&hash, &details);
batch.put_extras(&header.number(), &hash);
batch.put(b"best", &hash).unwrap();
bc.extras_db.write(batch).unwrap();
best_block.number = bc.block_number(&best_block_hash).unwrap();
best_block.total_difficulty = bc.block_details(&best_block_hash).unwrap().total_difficulty;
best_block.hash = best_block_hash;
}
/// Set the cache configuration.
pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) {
self.pref_cache_size = pref_cache_size;
self.max_cache_size = max_cache_size;
}
/// Returns a tree route between `from` and `to`, which is a tuple of:
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
/// - common ancestor of these blocks.
/// - an index where best common ancestor would be
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A5, to: A4
///
/// ```json
/// { blocks: [A5], ancestor: A4, index: 1 }
/// ```
/// - bc: `A1 -> A2 -> A3 -> A4 -> A5`
/// - from: A3, to: A4
/// ```json
/// { blocks: [A4], ancestor: A3, index: 0 }
/// ```
///
/// ```text
/// A1 -> A2 -> A3 -> A4
/// ```json
/// { blocks: [B4, B3, A3, A4], ancestor: A2, index: 2 }
/// ```
Marek Kotewicz
committed
pub fn tree_route(&self, from: H256, to: H256) -> TreeRoute {
let mut from_branch = vec![];
let mut to_branch = vec![];
let mut from_details = self.block_details(&from).expect(&format!("0. Expected to find details for block {:?}", from));
let mut to_details = self.block_details(&to).expect(&format!("1. Expected to find details for block {:?}", to));
Marek Kotewicz
committed
let mut current_from = from;
let mut current_to = to;
// reset from && to to the same level
while from_details.number > to_details.number {
from_branch.push(current_from);
current_from = from_details.parent.clone();
from_details = self.block_details(&from_details.parent).expect(&format!("2. Expected to find details for block {:?}", from_details.parent));
}
while to_details.number > from_details.number {
to_branch.push(current_to);
current_to = to_details.parent.clone();
to_details = self.block_details(&to_details.parent).expect(&format!("3. Expected to find details for block {:?}", to_details.parent));
}
assert_eq!(from_details.number, to_details.number);
// move to shared parent
from_branch.push(current_from);
current_from = from_details.parent.clone();
from_details = self.block_details(&from_details.parent).expect(&format!("4. Expected to find details for block {:?}", from_details.parent));
to_branch.push(current_to);
current_to = to_details.parent.clone();
to_details = self.block_details(&to_details.parent).expect(&format!("5. Expected to find details for block {:?}", from_details.parent));
}
let index = from_branch.len();
from_branch.extend(to_branch.into_iter().rev());
TreeRoute {
blocks: from_branch,
/// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified.
/// If the block is already known, does nothing.
pub fn insert_block(&self, bytes: &[u8], receipts: Vec<Receipt>) {
let block = BlockView::new(bytes);
let header = block.header_view();
// store block in db
self.blocks_db.put(&hash, &bytes).unwrap();
Marek Kotewicz
committed
let info = self.block_info(bytes);
self.apply_update(ExtrasUpdate {
block_hashes: self.prepare_block_hashes_update(bytes, &info),
block_details: self.prepare_block_details_update(bytes, &info),
block_receipts: self.prepare_block_receipts_update(receipts, &info),
transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info),
blocks_blooms: self.prepare_block_blooms_update(bytes, &info),
Marek Kotewicz
committed
info: info
});
/// Applies extras update.
fn apply_update(&self, update: ExtrasUpdate) {
let batch = DBTransaction::new();
batch.put(b"best", &update.info.hash).unwrap();
Marek Kotewicz
committed
match update.info.location {
BlockLocation::Branch => (),
_ => {
*best_block = BestBlock {
hash: update.info.hash,
number: update.info.number,
total_difficulty: update.info.total_difficulty
};
}
}
let mut write_hashes = self.block_hashes.write().unwrap();
for (number, hash) in &update.block_hashes {
batch.put_extras(number, hash);
Marek Kotewicz
committed
write_hashes.remove(number);
let mut write_details = self.block_details.write().unwrap();
for (hash, details) in update.block_details.into_iter() {
write_details.insert(hash, details);
Marek Kotewicz
committed
}
let mut write_receipts = self.block_receipts.write().unwrap();
for (hash, receipt) in &update.block_receipts {
batch.put_extras(hash, receipt);
Marek Kotewicz
committed
write_receipts.remove(hash);
}
let mut write_txs = self.transaction_addresses.write().unwrap();
for (hash, tx_address) in &update.transactions_addresses {
batch.put_extras(hash, tx_address);
Marek Kotewicz
committed
write_txs.remove(hash);
}
let mut write_blocks_blooms = self.blocks_blooms.write().unwrap();
for (bloom_hash, blocks_bloom) in &update.blocks_blooms {
batch.put_extras(bloom_hash, blocks_bloom);
write_blocks_blooms.remove(bloom_hash);
}
// update extras database
self.extras_db.write(batch).unwrap();
pub fn ancestry_iter(&self, first: H256) -> AncestryIter {
AncestryIter {
current: first,
chain: &self,
}
}
/// Given a block's `parent`, find every block header which represents a valid uncle.
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
pub fn find_uncle_headers(&self, parent: &H256) -> Vec<Header> {
let uncle_generations = 6usize;
/*
{
// Find great-uncles (or second-cousins or whatever they are) - children of great-grandparents, great-great-grandparents... that were not already uncles in previous generations.
clog(StateDetail) << "Checking " << m_previousBlock.hash() << ", parent=" << m_previousBlock.parentHash();
h256Hash excluded = _bc.allKinFrom(m_currentBlock.parentHash(), 6);
auto p = m_previousBlock.parentHash();
for (unsigned gen = 0; gen < 6 && p != _bc.genesisHash() && unclesCount < 2; ++gen, p = _bc.details(p).parent)
{
auto us = _bc.details(p).children;
assert(us.size() >= 1); // must be at least 1 child of our grandparent - it's our own parent!
for (auto const& u: us)
if (!excluded.count(u)) // ignore any uncles/mainline blocks that we know about.
{
uncleBlockHeaders.push_back(_bc.info(u));
unclesData.appendRaw(_bc.headerData(u));
++unclesCount;
if (unclesCount == 2)
break;
}
}
}
*/
let _excluded = self.ancestry_iter(parent.clone()).take(uncle_generations).collect::<HashSet<_>>();
Marek Kotewicz
committed
/// Get inserted block info which is critical to preapre extras updates.
fn block_info(&self, block_bytes: &[u8]) -> BlockInfo {
let block = BlockView::new(block_bytes);
Marek Kotewicz
committed
let number = header.number();
let parent_hash = header.parent_hash();
let parent_details = self.block_details(&parent_hash).expect(format!("Invalid parent hash: {:?}", parent_hash).as_ref());
let total_difficulty = parent_details.total_difficulty + header.difficulty();
let is_new_best = total_difficulty > self.best_block_total_difficulty();
Marek Kotewicz
committed
BlockInfo {
hash: hash,
number: number,
total_difficulty: total_difficulty,
location: if is_new_best {
// on new best block we need to make sure that all ancestors
// are moved to "canon chain"
// find the route between old best block and the new one
let best_hash = self.best_block_hash();
let route = self.tree_route(best_hash, parent_hash);
assert_eq!(number, parent_details.number + 1);
match route.blocks.len() {
0 => BlockLocation::CanonChain,
_ => BlockLocation::BranchBecomingCanonChain {
ancestor: route.ancestor,
route: route.blocks.into_iter().skip(route.index).collect()
Marek Kotewicz
committed
}
}
} else {
BlockLocation::Branch
Marek Kotewicz
committed
}
}
}
/// This function returns modified block hashes.
fn prepare_block_hashes_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<BlockNumber, H256> {
let mut block_hashes = HashMap::new();
Marek Kotewicz
committed
let block = BlockView::new(block_bytes);
let header = block.header_view();
let number = header.number();
match info.location {
BlockLocation::Branch => (),
Marek Kotewicz
committed
BlockLocation::CanonChain => {
block_hashes.insert(number, info.hash.clone());
Marek Kotewicz
committed
},
BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => {
let ancestor_number = self.block_number(ancestor).unwrap();
let start_number = ancestor_number + 1;
for (index, hash) in route.iter().cloned().enumerate() {
block_hashes.insert(start_number + index as BlockNumber, hash);
Marek Kotewicz
committed
}
block_hashes.insert(number, info.hash.clone());
Marek Kotewicz
committed
}
}
block_hashes
Marek Kotewicz
committed
}
/// This function returns modified block details.
fn prepare_block_details_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, BlockDetails> {
Marek Kotewicz
committed
let block = BlockView::new(block_bytes);
let header = block.header_view();
let parent_hash = header.parent_hash();
Marek Kotewicz
committed
// update parent
let mut parent_details = self.block_details(&parent_hash).expect(format!("Invalid parent hash: {:?}", parent_hash).as_ref());
parent_details.children.push(info.hash.clone());
// create current block details
let details = BlockDetails {
number: header.number(),
Marek Kotewicz
committed
total_difficulty: info.total_difficulty,
Marek Kotewicz
committed
// write to batch
let mut block_details = HashMap::new();
block_details.insert(parent_hash, parent_details);
block_details.insert(info.hash.clone(), details);
block_details
Marek Kotewicz
committed
}
/// This function returns modified block receipts.
fn prepare_block_receipts_update(&self, receipts: Vec<Receipt>, info: &BlockInfo) -> HashMap<H256, BlockReceipts> {
let mut block_receipts = HashMap::new();
block_receipts.insert(info.hash.clone(), BlockReceipts::new(receipts));
block_receipts
Marek Kotewicz
committed
}
/// This function returns modified transaction addresses.
fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, TransactionAddress> {
Marek Kotewicz
committed
let block = BlockView::new(block_bytes);
let transaction_hashes = block.transaction_hashes();
transaction_hashes.into_iter()
.enumerate()
.fold(HashMap::new(), |mut acc, (i ,tx_hash)| {
acc.insert(tx_hash, TransactionAddress {
block_hash: info.hash.clone(),
index: i
});
acc
})
Marek Kotewicz
committed
}
/// This functions returns modified blocks blooms.
Marek Kotewicz
committed
///
/// To accelerate blooms lookups, blomms are stored in multiple
/// layers (BLOOM_LEVELS, currently 3).
/// ChainFilter is responsible for building and rebuilding these layers.
/// It returns them in HashMap, where values are Blooms and
/// keys are BloomIndexes. BloomIndex represents bloom location on one
/// of these layers.
/// To reduce number of queries to databse, block blooms are stored
/// in BlocksBlooms structure which contains info about several
/// (BLOOM_INDEX_SIZE, currently 16) consecutive blocks blooms.
/// Later, BloomIndexer is used to map bloom location on filter layer (BloomIndex)
/// to bloom location in database (BlocksBloomLocation).
fn prepare_block_blooms_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap<H256, BlocksBlooms> {
Marek Kotewicz
committed
let block = BlockView::new(block_bytes);
let header = block.header_view();
Marek Kotewicz
committed
let modified_blooms = match info.location {
BlockLocation::Branch => HashMap::new(),
BlockLocation::CanonChain => {
ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels())
.add_bloom(&header.log_bloom(), header.number() as usize)
Marek Kotewicz
committed
BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => {
let ancestor_number = self.block_number(ancestor).unwrap();
Marek Kotewicz
committed
let mut blooms: Vec<H2048> = route.iter()
.map(|hash| self.block(hash).unwrap())
.map(|bytes| BlockView::new(&bytes).header_view().log_bloom())
.collect();
Marek Kotewicz
committed
blooms.push(header.log_bloom());
Marek Kotewicz
committed
ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels())
.reset_chain_head(&blooms, start_number as usize, self.best_block_number() as usize)
}
};
modified_blooms.into_iter()
.fold(HashMap::new(), | mut acc, (bloom_index, bloom) | {
{
let location = self.bloom_indexer.location(&bloom_index);
let mut blocks_blooms = acc
.entry(location.hash.clone())
.or_insert_with(|| self.blocks_blooms(&location.hash).unwrap_or_else(BlocksBlooms::new));
Marek Kotewicz
committed
assert_eq!(self.bloom_indexer.index_size(), blocks_blooms.blooms.len());
blocks_blooms.blooms[location.index] = bloom;
}
acc
})
pub fn best_block_hash(&self) -> H256 {
pub fn best_block_number(&self) -> BlockNumber {
/// Get best block total difficulty.
pub fn best_block_total_difficulty(&self) -> U256 {
/// Get block blooms.
fn blocks_blooms(&self, hash: &H256) -> Option<BlocksBlooms> {
self.query_extras(hash, &self.blocks_blooms)
Marek Kotewicz
committed
}
fn query_extras<K, T>(&self, hash: &K, cache: &RwLock<HashMap<K, T>>) -> Option<T> where
if let Some(v) = read.get(hash) {
return Some(v.clone());
if let Some(h) = hash.as_h256() {
self.note_used(CacheID::Extras(T::extras_index(), h.clone()));
}
fn query_extras_exist<K, T>(&self, hash: &K, cache: &RwLock<HashMap<K, T>>) -> bool where
K: ExtrasSliceConvertable + Eq + Hash + Clone,
T: ExtrasIndexable {
if let Some(_) = read.get(hash) {
return true;
pub fn cache_size(&self) -> CacheSize {
CacheSize {
blocks: self.blocks.read().unwrap().heap_size_of_children(),
block_details: self.block_details.read().unwrap().heap_size_of_children(),
transaction_addresses: self.transaction_addresses.read().unwrap().heap_size_of_children(),
block_logs: self.block_logs.read().unwrap().heap_size_of_children(),
blocks_blooms: self.blocks_blooms.read().unwrap().heap_size_of_children(),
block_receipts: self.block_receipts.read().unwrap().heap_size_of_children()
/// Let the cache system know that a cacheable item has been used.
fn note_used(&self, id: CacheID) {
let mut cache_man = self.cache_man.write().unwrap();
if !cache_man.cache_usage[0].contains(&id) {
cache_man.cache_usage[0].insert(id.clone());
if cache_man.in_use.contains(&id) {
if let Some(c) = cache_man.cache_usage.iter_mut().skip(1).find(|e|e.contains(&id)) {
c.remove(&id);
}
} else {
cache_man.in_use.insert(id);
}
}
}
/// Ticks our cache system and throws out any old data.
pub fn collect_garbage(&self) {
if self.cache_size().total() < self.pref_cache_size { return; }
for _ in 0..COLLECTION_QUEUE_SIZE {
{
let mut cache_man = self.cache_man.write().unwrap();
let mut blocks = self.blocks.write().unwrap();
let mut block_details = self.block_details.write().unwrap();
let mut block_hashes = self.block_hashes.write().unwrap();
let mut transaction_addresses = self.transaction_addresses.write().unwrap();
let mut block_logs = self.block_logs.write().unwrap();
let mut blocks_blooms = self.blocks_blooms.write().unwrap();
let mut block_receipts = self.block_receipts.write().unwrap();
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
cache_man.in_use.remove(&id);
match id {
CacheID::Block(h) => { blocks.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockDetails, h) => { block_details.remove(&h); },
CacheID::Extras(ExtrasIndex::TransactionAddress, h) => { transaction_addresses.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockLogBlooms, h) => { block_logs.remove(&h); },
CacheID::Extras(ExtrasIndex::BlocksBlooms, h) => { blocks_blooms.remove(&h); },
CacheID::Extras(ExtrasIndex::BlockReceipts, h) => { block_receipts.remove(&h); },
_ => panic!(),
}
}
cache_man.cache_usage.push_front(HashSet::new());
// TODO: handle block_hashes properly.
block_hashes.clear();
if self.cache_size().total() < self.max_cache_size { break; }
#[cfg(test)]
mod tests {
use std::str::FromStr;
use rustc_serialize::hex::FromHex;
use util::hash::*;
use blockchain::{BlockProvider, BlockChain, BlockChainConfig};
use tests::helpers::*;
use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer};
fn basic_blockchain_insert() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let first = canon_chain.generate(&mut finalizer).unwrap();
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let first_hash = BlockView::new(&first).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
assert_eq!(bc.genesis_hash(), genesis_hash.clone());
assert_eq!(bc.best_block_hash(), genesis_hash.clone());
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.block_hash(1), None);
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]);
bc.insert_block(&first, vec![]);
assert_eq!(bc.block_hash(0), Some(genesis_hash.clone()));
assert_eq!(bc.best_block_number(), 1);
assert_eq!(bc.best_block_hash(), first_hash.clone());
assert_eq!(bc.block_hash(1), Some(first_hash.clone()));
assert_eq!(bc.block_details(&first_hash).unwrap().parent, genesis_hash.clone());
assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![first_hash.clone()]);
#[test]
fn check_ancestry_iter() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
let mut block_hashes = vec![genesis_hash.clone()];
for _ in 0..10 {
let block = canon_chain.generate(&mut finalizer).unwrap();
block_hashes.push(BlockView::new(&block).header_view().sha3());
bc.insert_block(&block, vec![]);
}
block_hashes.reverse();
assert_eq!(bc.ancestry_iter(block_hashes[0].clone()).collect::<Vec<_>>(), block_hashes)
}
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let b1 = canon_chain.generate(&mut finalizer).unwrap();
let b2 = canon_chain.generate(&mut finalizer).unwrap();
let b3b = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap();
let b3a = canon_chain.generate(&mut finalizer).unwrap();
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let b1_hash= BlockView::new(&b1).header_view().sha3();
let b2_hash= BlockView::new(&b2).header_view().sha3();
let b3a_hash= BlockView::new(&b3a).header_view().sha3();
let b3b_hash= BlockView::new(&b3b).header_view().sha3();
// b3a is a part of canon chain, whereas b3b is part of sidechain
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
bc.insert_block(&b1, vec![]);
bc.insert_block(&b2, vec![]);
bc.insert_block(&b3a, vec![]);
bc.insert_block(&b3b, vec![]);
assert_eq!(bc.best_block_hash(), best_block_hash);
assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0);
assert_eq!(bc.block_number(&b1_hash).unwrap(), 1);
assert_eq!(bc.block_number(&b2_hash).unwrap(), 2);
assert_eq!(bc.block_number(&b3a_hash).unwrap(), 3);
assert_eq!(bc.block_number(&b3b_hash).unwrap(), 3);
assert_eq!(bc.block_hash(0).unwrap(), genesis_hash);
assert_eq!(bc.block_hash(1).unwrap(), b1_hash);
assert_eq!(bc.block_hash(2).unwrap(), b2_hash);
assert_eq!(bc.block_hash(3).unwrap(), b3a_hash);
Marek Kotewicz
committed
let r0_1 = bc.tree_route(genesis_hash.clone(), b1_hash.clone());
assert_eq!(r0_1.ancestor, genesis_hash);
assert_eq!(r0_1.blocks, [b1_hash.clone()]);
assert_eq!(r0_1.index, 0);
Marek Kotewicz
committed
let r0_2 = bc.tree_route(genesis_hash.clone(), b2_hash.clone());
assert_eq!(r0_2.ancestor, genesis_hash);
assert_eq!(r0_2.blocks, [b1_hash.clone(), b2_hash.clone()]);
assert_eq!(r0_2.index, 0);
Marek Kotewicz
committed
let r1_3a = bc.tree_route(b1_hash.clone(), b3a_hash.clone());
assert_eq!(r1_3a.ancestor, b1_hash);
assert_eq!(r1_3a.blocks, [b2_hash.clone(), b3a_hash.clone()]);
assert_eq!(r1_3a.index, 0);
Marek Kotewicz
committed
let r1_3b = bc.tree_route(b1_hash.clone(), b3b_hash.clone());
assert_eq!(r1_3b.ancestor, b1_hash);
assert_eq!(r1_3b.blocks, [b2_hash.clone(), b3b_hash.clone()]);
assert_eq!(r1_3b.index, 0);
Marek Kotewicz
committed
let r3a_3b = bc.tree_route(b3a_hash.clone(), b3b_hash.clone());
assert_eq!(r3a_3b.ancestor, b2_hash);
assert_eq!(r3a_3b.blocks, [b3a_hash.clone(), b3b_hash.clone()]);
assert_eq!(r3a_3b.index, 1);
Marek Kotewicz
committed
let r1_0 = bc.tree_route(b1_hash.clone(), genesis_hash.clone());
assert_eq!(r1_0.ancestor, genesis_hash);
assert_eq!(r1_0.blocks, [b1_hash.clone()]);
assert_eq!(r1_0.index, 1);
Marek Kotewicz
committed
let r2_0 = bc.tree_route(b2_hash.clone(), genesis_hash.clone());
assert_eq!(r2_0.ancestor, genesis_hash);
assert_eq!(r2_0.blocks, [b2_hash.clone(), b1_hash.clone()]);
assert_eq!(r2_0.index, 2);
Marek Kotewicz
committed
let r3a_1 = bc.tree_route(b3a_hash.clone(), b1_hash.clone());
assert_eq!(r3a_1.ancestor, b1_hash);
assert_eq!(r3a_1.blocks, [b3a_hash.clone(), b2_hash.clone()]);
assert_eq!(r3a_1.index, 2);
Marek Kotewicz
committed
let r3b_1 = bc.tree_route(b3b_hash.clone(), b1_hash.clone());
assert_eq!(r3b_1.ancestor, b1_hash);
assert_eq!(r3b_1.blocks, [b3b_hash.clone(), b2_hash.clone()]);
assert_eq!(r3b_1.index, 2);
Marek Kotewicz
committed
let r3b_3a = bc.tree_route(b3b_hash.clone(), b3a_hash.clone());
assert_eq!(r3b_3a.ancestor, b2_hash);
assert_eq!(r3b_3a.blocks, [b3b_hash.clone(), b3a_hash.clone()]);
assert_eq!(r3b_3a.index, 1);
}
#[test]
fn test_reopen_blockchain_db() {
let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default();
let genesis = canon_chain.generate(&mut finalizer).unwrap();
let first = canon_chain.generate(&mut finalizer).unwrap();
let genesis_hash = BlockView::new(&genesis).header_view().sha3();
let first_hash = BlockView::new(&first).header_view().sha3();
let temp = RandomTempPath::new();
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
assert_eq!(bc.best_block_hash(), genesis_hash);
bc.insert_block(&first, vec![]);
assert_eq!(bc.best_block_hash(), first_hash);
let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path());
assert_eq!(bc.best_block_hash(), first_hash);
fn can_contain_arbitrary_block_sequence() {
let bc_result = generate_dummy_blockchain(50);
let bc = bc_result.reference();