Newer
Older
use std::collections::{VecDeque, HashSet};
use linked_hash_map::LinkedHashMap;
use chain::{IndexedBlockHeader, IndexedBlock, IndexedTransaction, OutPoint, TransactionOutput};
use miner::{MemoryPoolOrderingStrategy, MemoryPoolInformation, FeeCalculator};
use primitives::bytes::Bytes;
use primitives::hash::H256;
use utils::{BestHeadersChain, BestHeadersChainInformation, HashQueueChain, HashPosition};
use types::{BlockHeight, StorageRef, MemoryPoolRef};
/// Index of 'verifying' queue
const VERIFYING_QUEUE: usize = 0;
/// Index of 'requested' queue
const REQUESTED_QUEUE: usize = 1;
/// Index of 'scheduled' queue
const SCHEDULED_QUEUE: usize = 2;
/// Number of hash queues
const NUMBER_OF_QUEUES: usize = 3;
/// Block insertion result
pub struct BlockInsertionResult {
/// Hashes of blocks, which were canonized during this insertion procedure. Order matters
pub canonized_blocks_hashes: Vec<H256>,
/// Transaction to 'reverify'. Order matters
impl fmt::Debug for BlockInsertionResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BlockInsertionResult")
.field("canonized_blocks_hashes", &self.canonized_blocks_hashes.iter().map(H256::reversed).collect::<Vec<_>>())
.field("transactions_to_reverify", &self.transactions_to_reverify)
.finish()
}
}
impl BlockInsertionResult {
#[cfg(test)]
pub fn with_canonized_blocks(canonized_blocks_hashes: Vec<H256>) -> Self {
BlockInsertionResult {
canonized_blocks_hashes: canonized_blocks_hashes,
transactions_to_reverify: Vec::new(),
}
}
}
/// Block synchronization state
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum BlockState {
/// Block is unknown
Unknown,
/// Scheduled for requesting
Scheduled,
/// Requested from peers
Requested,
/// Currently verifying
Verifying,
/// In storage
Stored,
/// This block has been marked as dead-end block
DeadEnd,
/// Transactions synchronization state
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum TransactionState {
/// Transaction is unknown
Unknown,
/// Currently verifying
Verifying,
/// In memory pool
InMemory,
/// In storage
Stored,
}
/// Synchronization chain information
pub struct Information {
/// Number of blocks hashes currently scheduled for requesting
/// Number of blocks hashes currently requested from peers
/// Number of blocks currently verifying
/// Number of blocks in the storage
/// Information on memory pool
pub transactions: MemoryPoolInformation,
}
/// Blockchain from synchroniation point of view, consisting of:
/// 1) all blocks from the `storage` [oldest blocks]
/// 2) all blocks currently verifying by `verification_queue`
/// 3) all blocks currently requested from peers
/// 4) all blocks currently scheduled for requesting [newest blocks]
pub struct Chain {
/// Genesis block hash (stored for optimizations)
genesis_block_hash: H256,
/// Best storage block (stored for optimizations)
best_storage_block: storage::BestBlock,
/// Local blocks storage
/// In-memory queue of blocks hashes
hash_chain: HashQueueChain,
/// In-memory queue of blocks headers
headers_chain: BestHeadersChain,
/// Currently verifying transactions
verifying_transactions: LinkedHashMap<H256, IndexedTransaction>,
/// Blocks that have been marked as dead-ends
dead_end_blocks: HashSet<H256>,
/// Is SegWit is possible on this chain? SegWit inventory types are used when block/tx-es are
/// requested and this flag is true.
is_segwit_possible: bool,
}
impl BlockState {
pub fn from_queue_index(queue_index: usize) -> BlockState {
match queue_index {
SCHEDULED_QUEUE => BlockState::Scheduled,
REQUESTED_QUEUE => BlockState::Requested,
VERIFYING_QUEUE => BlockState::Verifying,
_ => panic!("Unsupported queue_index: {}", queue_index),
}
}
pub fn to_queue_index(&self) -> usize {
match *self {
BlockState::Scheduled => SCHEDULED_QUEUE,
BlockState::Requested => REQUESTED_QUEUE,
BlockState::Verifying => VERIFYING_QUEUE,
_ => panic!("Unsupported queue: {:?}", self),
}
}
}
impl Chain {
/// Create new `Chain` with given storage
pub fn new(storage: StorageRef, consensus: ConsensusParams, memory_pool: MemoryPoolRef) -> Self {
// we only work with storages with genesis block
let genesis_block_hash = storage.block_hash(0)
.expect("storage with genesis block is required");
let best_storage_block_hash = best_storage_block.hash.clone();
let is_segwit_possible = consensus.is_segwit_possible();
genesis_block_hash: genesis_block_hash,
best_storage_block: best_storage_block,
storage: storage,
hash_chain: HashQueueChain::with_number_of_queues(NUMBER_OF_QUEUES),
headers_chain: BestHeadersChain::new(best_storage_block_hash),
verifying_transactions: LinkedHashMap::new(),
dead_end_blocks: HashSet::new(),
is_segwit_possible,
}
}
/// Get information on current blockchain state
pub fn information(&self) -> Information {
Information {
scheduled: self.hash_chain.len_of(SCHEDULED_QUEUE),
requested: self.hash_chain.len_of(REQUESTED_QUEUE),
verifying: self.hash_chain.len_of(VERIFYING_QUEUE),
stored: self.best_storage_block.number + 1,
/// Get memory pool
pub fn memory_pool(&self) -> MemoryPoolRef {
self.memory_pool.clone()
Svyatoslav Nikolsky
committed
}
pub fn is_segwit_possible(&self) -> bool {
self.is_segwit_possible
/// Get number of blocks in given state
pub fn length_of_blocks_state(&self, state: BlockState) -> BlockHeight {
match state {
BlockState::Stored => self.best_storage_block.number + 1,
_ => self.hash_chain.len_of(state.to_queue_index()),
}
Loading full blame...