Newer
Older
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{HashSet, HashMap, BTreeMap, VecDeque};
use std::sync::{Arc, Weak};
use std::fmt;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable};
use util::{journaldb, TrieFactory, Trie};
use util::{U256, H256, Address, H2048, Uint, FixedHash};
use util::trie::TrieSpec;
use error::{ImportError, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError};
use service::ClientIoMessage;
use verification;
use verification::{PreverifiedBlock, Verifier};
use transaction::{LocalizedTransaction, UnverifiedTransaction, SignedTransaction, Transaction, PendingTransaction, Action};
use log_entry::LocalizedLogEntry;
use verification::queue::BlockQueue;
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
use executive::{Executive, Executed, TransactOptions, contract_address};
use receipt::{Receipt, LocalizedReceipt};
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
use evm::{Factory as EvmFactory, Schedule};
use miner::{Miner, MinerService};
use snapshot::{self, io as snapshot_io};
use factory::Factories;
use rlp::{View, UntrustedRlp};
// re-export
pub use types::blockchain_info::BlockChainInfo;
pub use types::block_status::BlockStatus;
pub use blockchain::CacheSize as BlockChainCacheSize;
pub use verification::queue::QueueInfo as BlockQueueInfo;
const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2;
const MIN_HISTORY_SIZE: u64 = 8;
impl fmt::Display for BlockChainInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
}
}
#[derive(Default, Clone, Debug, Eq, PartialEq)]
/// Memory used by state DB
pub state_db_mem: usize,
/// Alter internal reporting to reflect the additional `block` has been processed.
pub fn accrue_block(&mut self, block: &PreverifiedBlock) {
self.blocks_imported += 1;
self.transactions_applied += block.transactions.len();
self.gas_processed = self.gas_processed + block.header.gas_used().clone();
struct SleepState {
last_activity: Option<Instant>,
last_autosleep: Option<Instant>,
}
impl SleepState {
fn new(awake: bool) -> Self {
SleepState {
last_activity: match awake { false => None, true => Some(Instant::now()) },
last_autosleep: match awake { false => Some(Instant::now()), true => None },
}
}
}
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
pub struct Client {
chain: RwLock<Arc<BlockChain>>,
tracedb: RwLock<TraceDB<BlockChain>>,
config: ClientConfig,
pruning: journaldb::Algorithm,
db: RwLock<Arc<Database>>,
state_db: Mutex<StateDB>,
Tomusdrw
committed
import_lock: Mutex<()>,
verifier: Box<Verifier>,
sleep_state: Mutex<SleepState>,
liveness: AtomicBool,
io_channel: Mutex<IoChannel<ClientIoMessage>>,
notify: RwLock<Vec<Weak<ChainNotify>>>,
last_hashes: RwLock<VecDeque<H256>>,
factories: Factories,
on_mode_change: Mutex<Option<Box<FnMut(&Mode) + 'static + Send>>>,
/// Create a new client with given spec and DB path and custom verifier.
message_channel: IoChannel<ClientIoMessage>,
db_config: &DatabaseConfig,
) -> Result<Arc<Client>, ClientError> {
let db = Arc::new(Database::open(&db_config, &path.to_str().expect("DB path could not be converted to string.")).map_err(ClientError::Database)?);
let trie_spec = match config.fat_db {
true => TrieSpec::Fat,
false => TrieSpec::Secure,
};
let factories = Factories {
vm: EvmFactory::new(config.vm_type.clone(), config.jump_table_size),
trie: trie_factory,
accountdb: Default::default(),
};
let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
let mut state_db = StateDB::new(journal_db, config.state_cache_size);
if state_db.journal_db().is_empty() {
// Sets the correct state root.
state_db = spec.ensure_db_good(state_db, &factories)?;
let mut batch = DBTransaction::new(&db);
state_db.journal_under(&mut batch, 0, &spec.genesis_header().hash())?;
db.write(batch).map_err(ClientError::Database)?;
Tomusdrw
committed
let gb = spec.genesis_block();
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
trace!("Cleanup journal: DB Earliest = {:?}, Latest = {:?}", state_db.journal_db().earliest_era(), state_db.journal_db().latest_era());
let history = if config.history < MIN_HISTORY_SIZE {
info!(target: "client", "Ignoring pruning history parameter of {}\
, falling back to minimum of {}",
config.history, MIN_HISTORY_SIZE);
MIN_HISTORY_SIZE
} else {
config.history
};
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.journal_db().contains(h.state_root())) {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
}
let engine = spec.engine.clone();
let block_queue = BlockQueue::new(config.queue.clone(), engine.clone(), message_channel.clone(), config.verifier_type.verifying_seal());
let panic_handler = PanicHandler::new_in_arc();
panic_handler.forward_from(&block_queue);
Tomusdrw
committed
let awake = match config.mode { Mode::Dark(..) | Mode::Off => false, _ => true };
enabled: AtomicBool::new(true),
sleep_state: Mutex::new(SleepState::new(awake)),
liveness: AtomicBool::new(awake),
Tomusdrw
committed
engine: engine,
pruning: config.pruning.clone(),
verifier: verification::new(config.verifier_type.clone()),
config: config,
db: RwLock::new(db),
state_db: Mutex::new(state_db),
report: RwLock::new(Default::default()),
import_lock: Mutex::new(()),
notify: RwLock::new(Vec::new()),
last_hashes: RwLock::new(VecDeque::new()),
factories: factories,
rng: Mutex::new(OsRng::new().map_err(::util::UtilError::StdIo)?),
{
let state_db = client.state_db.lock().boxed_clone();
let chain = client.chain.read();
client.prune_ancient(state_db, &chain)?;
}
if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) {
let weak = Arc::downgrade(&client);
let registrar = Registry::new(reg_addr, move |a, d| weak.upgrade().ok_or("No client!".into()).and_then(|c| c.call_contract(a, d)));
*client.registrar.lock() = Some(registrar);
}
Ok(client)
/// Adds an actor to be notified on certain events
pub fn add_notify(&self, target: Arc<ChainNotify>) {
self.notify.write().push(Arc::downgrade(&target));
/// Returns engine reference.
pub fn engine(&self) -> &Engine {
&*self.engine
}
fn notify<F>(&self, f: F) where F: Fn(&ChainNotify) {
for np in self.notify.read().iter() {
if let Some(n) = np.upgrade() {
f(&*n);
}
}
/// Get the Registry object - useful for looking up names.
pub fn registrar(&self) -> MutexGuard<Option<Registry>> {
self.registrar.lock()
}
/// Register an action to be done if a mode change happens.
pub fn on_mode_change<F>(&self, f: F) where F: 'static + FnMut(&Mode) + Send {
*self.on_mode_change.lock() = Some(Box::new(f));
}
/// Flush the block import queue.
pub fn flush_queue(&self) {
while !self.block_queue.queue_info().is_empty() {
/// The env info as of the best block.
fn latest_env_info(&self) -> EnvInfo {
let header = self.best_block_header();
number: header.number(),
author: header.author(),
timestamp: header.timestamp(),
difficulty: header.difficulty(),
last_hashes: self.build_last_hashes(header.hash()),
gas_limit: header.gas_limit(),
fn build_last_hashes(&self, parent_hash: H256) -> Arc<LastHashes> {
{
let hashes = self.last_hashes.read();
if hashes.front().map_or(false, |h| h == &parent_hash) {
let mut res = Vec::from(hashes.clone());
res.resize(256, H256::default());
let mut last_hashes = LastHashes::new();
for i in 0..255 {
Some(details) => {
last_hashes[i + 1] = details.parent.clone();
},
None => break,
}
}
let mut cached_hashes = self.last_hashes.write();
*cached_hashes = VecDeque::from(last_hashes.clone());
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<LockedBlock, ()> {
let engine = &*self.engine;
// Check the block isn't so old we won't be able to enact it.
let best_block_number = chain.best_block_number();
if best_block_number >= self.history && header.number() <= best_block_number - self.history {
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
return Err(());
}
let verify_family_result = self.verifier.verify_block_family(header, &block.bytes, engine, &**chain);
if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(());
};
// Check if Parent is in chain
let chain_has_parent = chain.block_header(header.parent_hash());
if let Some(parent) = chain_has_parent {
// Enact Verified Block
let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let db = self.state_db.lock().boxed_clone_canon(header.parent_hash());
let enact_result = enact_verified(block, engine, self.tracedb.read().tracing_enabled(), db, &parent, last_hashes, self.factories.clone());
let locked_block = enact_result.map_err(|e| {
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
// Final Verification
if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) {
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(());
}
Ok(locked_block)
} else {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash());
Err(())
fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) {
fn map_to_vec(map: Vec<(H256, bool)>) -> Vec<H256> {
map.into_iter().map(|(k, _v)| k).collect()
}
// In ImportRoute we get all the blocks that have been enacted and retracted by single insert.
// Because we are doing multiple inserts some of the blocks that were enacted in import `k`
// could be retracted in import `k+1`. This is why to understand if after all inserts
// the block is enacted or retracted we iterate over all routes and at the end final state
// will be in the hashmap
let map = import_results.iter().fold(HashMap::new(), |mut map, route| {
for hash in &route.enacted {
map.insert(hash.clone(), true);
for hash in &route.retracted {
map.insert(hash.clone(), false);
}
map
});
// Split to enacted retracted (using hashmap value)
let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v);
// And convert tuples to keys
(map_to_vec(enacted), map_to_vec(retracted))
}
/// This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self) -> usize {
// Shortcut out if we know we're incapable of syncing the chain.
if !self.enabled.load(AtomicOrdering::Relaxed) {
return 0;
}
let (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration, is_empty) = {
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
let mut invalid_blocks = HashSet::new();
let mut proposed_blocks = Vec::with_capacity(max_blocks_to_import);
let mut import_results = Vec::with_capacity(max_blocks_to_import);
let _import_lock = self.import_lock.lock();
let blocks = self.block_queue.drain(max_blocks_to_import);
if blocks.is_empty() {
return 0;
}
let _timer = PerfTimer::new("import_verified_blocks");
let start = precise_time_ns();
for block in blocks {
let header = &block.header;
let is_invalid = invalid_blocks.contains(header.parent_hash());
invalid_blocks.insert(header.hash());
continue;
}
if let Ok(closed_block) = self.check_and_close_block(&block) {
proposed_blocks.push(block.bytes);
} else {
imported_blocks.push(header.hash());
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
import_results.push(route);
} else {
invalid_blocks.insert(header.hash());
}
let imported = imported_blocks.len();
let invalid_blocks = invalid_blocks.into_iter().collect::<Vec<H256>>();
if !invalid_blocks.is_empty() {
self.block_queue.mark_as_bad(&invalid_blocks);
let is_empty = self.block_queue.mark_as_good(&imported_blocks);
let duration_ns = precise_time_ns() - start;
(imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration_ns, is_empty)
if !imported_blocks.is_empty() && is_empty {
let (enacted, retracted) = self.calculate_enacted_retracted(&import_results);
self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted);
}
notify.new_blocks(
imported_blocks.clone(),
invalid_blocks.clone(),
enacted.clone(),
retracted.clone(),
self.db.read().flush().expect("DB flush failed.");
/// Import a block with transaction receipts.
/// The block is guaranteed to be the next best blocks in the first block sequence.
/// Does no sealing or transaction validation.
fn import_old_block(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result<H256, ::error::Error> {
let block = BlockView::new(&block_bytes);
let header = block.header();
let hash = header.hash();
let _import_lock = self.import_lock.lock();
{
let _timer = PerfTimer::new("import_old_block");
let mut rng = self.rng.lock();
::snapshot::verify_old_block(
&mut *rng,
&header,
&*self.engine,
&*chain,
Some(&block_bytes),
false,
// Commit results
let receipts = ::rlp::decode(&receipts_bytes);
let mut batch = DBTransaction::new(&self.db.read());
chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, false, true);
// Final commit to the DB
self.db.read().write_buffered(batch);
chain.commit();
}
self.db.read().flush().expect("DB flush failed.");
fn commit_block<B>(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain {
let number = block.header().number();
let parent = block.header().parent_hash().clone();
let receipts = block.receipts().to_owned();
let traces = block.traces().clone().unwrap_or_else(Vec::new);
let traces: Vec<FlatTransactionTraces> = traces.into_iter()
.map(Into::into)
.collect();
//let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
let mut batch = DBTransaction::new(&self.db.read());
// CHECK! I *think* this is fine, even if the state_root is equal to another
// already-imported block of the same number.
// TODO: Prove it with a test.
let mut state = block.drain();
state.journal_under(&mut batch, number, hash).expect("DB commit failed");
let route = chain.insert_block(&mut batch, block_data, receipts);
self.tracedb.read().import(&mut batch, TraceImportRequest {
block_hash: hash.clone(),
block_number: number,
enacted: route.enacted.clone(),
retracted: route.retracted.len()
});
let is_canon = route.enacted.last().map_or(false, |h| h == hash);
state.sync_cache(&route.enacted, &route.retracted, is_canon);
self.db.read().write_buffered(batch);
chain.commit();
self.update_last_hashes(&parent, hash);
if let Err(e) = self.prune_ancient(state, &chain) {
warn!("Failed to prune ancient state data: {}", e);
}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
// prune ancient states until below the memory limit or only the minimum amount remain.
fn prune_ancient(&self, mut state_db: StateDB, chain: &BlockChain) -> Result<(), ClientError> {
let number = match state_db.journal_db().latest_era() {
Some(n) => n,
None => return Ok(()),
};
// prune all ancient eras until we're below the memory target,
// but have at least the minimum number of states.
loop {
let needs_pruning = state_db.journal_db().is_pruned() &&
state_db.journal_db().journal_size() >= self.config.history_mem;
if !needs_pruning { break }
match state_db.journal_db().earliest_era() {
Some(era) if era + self.history <= number => {
trace!(target: "client", "Pruning state for ancient era {}", era);
match chain.block_hash(era) {
Some(ancient_hash) => {
let mut batch = DBTransaction::new(&self.db.read());
state_db.mark_canonical(&mut batch, era, &ancient_hash)?;
self.db.read().write_buffered(batch);
state_db.journal_db().flush();
}
None =>
debug!(target: "client", "Missing expected hash for block {}", era),
}
}
_ => break, // means that every era is kept, no pruning necessary.
}
}
Ok(())
}
fn update_last_hashes(&self, parent: &H256, hash: &H256) {
let mut hashes = self.last_hashes.write();
if hashes.front().map_or(false, |h| h == parent) {
if hashes.len() > 255 {
hashes.pop_back();
}
hashes.push_front(hash.clone());
}
}
pub fn import_queued_transactions(&self, transactions: &[Bytes], peer_id: usize) -> usize {
let _timer = PerfTimer::new("import_queued_transactions");
self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst);
let txs: Vec<UnverifiedTransaction> = transactions.iter().filter_map(|bytes| UntrustedRlp::new(bytes).as_val().ok()).collect();
let hashes: Vec<_> = txs.iter().map(|tx| tx.hash()).collect();
self.notify(|notify| {
notify.transactions_received(hashes.clone(), peer_id);
let results = self.miner.import_external_transactions(self, txs);
/// Get shared miner reference.
pub fn miner(&self) -> Arc<Miner> {
self.miner.clone()
}
/// Replace io channel. Useful for testing.
pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) {
*self.io_channel.lock() = io_channel;
/// Attempt to get a copy of a specific block's final state.
/// This will not fail if given BlockId::Latest.
/// Otherwise, this can fail (but may not) if the DB prunes state or the block
/// is unknown.
pub fn state_at(&self, id: BlockId) -> Option<State> {
// fast path for latest state.
BlockId::Pending => return self.miner.pending_state().or_else(|| Some(self.state())),
BlockId::Latest => return Some(self.state()),
let block_number = match self.block_number(id.clone()) {
Some(num) => num,
None => return None,
};
asynchronous rob
committed
self.block_header(id).and_then(|header| {
let db = self.state_db.lock().boxed_clone();
if db.is_pruned() && self.chain.read().best_block_number() >= block_number + self.history {
let root = header.state_root();
State::from_existing(db, root, self.engine.account_start_nonce(), self.factories.clone()).ok()
/// Attempt to get a copy of a specific block's beginning state.
///
/// This will not fail if given BlockId::Latest.
/// Otherwise, this can fail (but may not) if the DB prunes state.
pub fn state_at_beginning(&self, id: BlockId) -> Option<State> {
// fast path for latest state.
match id {
BlockId::Pending => self.state_at(BlockId::Latest),
id => match self.block_number(id) {
None | Some(0) => None,
Some(n) => self.state_at(BlockId::Number(n - 1)),
/// Get a copy of the best block's state.
pub fn state(&self) -> State {
let header = self.best_block_header();
self.state_db.lock().boxed_clone_canon(&header.hash()),
header.state_root(),
self.factories.clone())
.expect("State root of best block header always valid.")
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
/// Get the report.
pub fn report(&self) -> ClientReport {
let mut report = self.report.read().clone();
report.state_db_mem = self.state_db.lock().mem_used();
self.check_garbage();
self.check_snooze();
}
fn check_garbage(&self) {
self.block_queue.collect_garbage();
let mode = self.mode.lock().clone();
match mode {
let mut ss = self.sleep_state.lock();
self.sleep();
ss.last_activity = None;
}
}
}
Mode::Passive(timeout, wakeup_after) => {
let mut ss = self.sleep_state.lock();
let now = Instant::now();
if let Some(t) = ss.last_activity {
self.sleep();
ss.last_activity = None;
ss.last_autosleep = Some(now);
}
}
if let Some(t) = ss.last_autosleep {
if now > t + wakeup_after {
self.wake_up();
ss.last_activity = Some(now);
ss.last_autosleep = None;
}
}
}
_ => {}
}
/// Take a snapshot at the given block.
/// If the ID given is "latest", this will default to 1000 blocks behind.
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockId, p: &snapshot::Progress) -> Result<(), EthcoreError> {
let db = self.state_db.lock().journal_db().boxed_clone();
let best_block_number = self.chain_info().best_block_number;
let block_number = self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))?;
if best_block_number > self.history + block_number && db.is_pruned() {
return Err(snapshot::Error::OldBlockPrunedDB.into());
}
let history = ::std::cmp::min(self.history, 1000);
let start_hash = match at {
let start_num = match db.earliest_era() {
Some(era) => ::std::cmp::max(era, best_block_number - history),
None => best_block_number - history,
};
match self.block_hash(BlockId::Number(start_num)) {
Some(h) => h,
None => return Err(snapshot::Error::InvalidStartingBlock(at).into()),
}
}
_ => match self.block_hash(at) {
Some(hash) => hash,
None => return Err(snapshot::Error::InvalidStartingBlock(at).into()),
},
snapshot::take_snapshot(&self.chain.read(), start_hash, db.as_hashdb(), writer, p)?;
/// Ask the client what the history parameter is.
pub fn pruning_history(&self) -> u64 {
self.history
}
fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> {
match id {
BlockId::Hash(hash) => Some(hash),
BlockId::Number(number) => chain.block_hash(number),
BlockId::Earliest => chain.block_hash(0),
BlockId::Latest | BlockId::Pending => Some(chain.best_block_hash()),
fn transaction_address(&self, id: TransactionId) -> Option<TransactionAddress> {
TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash),
TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress {
fn wake_up(&self) {
if !self.liveness.load(AtomicOrdering::Relaxed) {
self.liveness.store(true, AtomicOrdering::Relaxed);
self.notify(|n| n.start());
trace!(target: "mode", "wake_up: Waking.");
}
}
fn sleep(&self) {
if self.liveness.load(AtomicOrdering::Relaxed) {
// only sleep if the import queue is mostly empty.
if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON {
self.liveness.store(false, AtomicOrdering::Relaxed);
self.notify(|n| n.stop());
trace!(target: "mode", "sleep: Sleeping.");
} else {
trace!(target: "mode", "sleep: Cannot sleep - syncing ongoing.");
// TODO: Consider uncommenting.
//*self.last_activity.lock() = Some(Instant::now());
impl snapshot::DatabaseRestore for Client {
/// Restart the client with a new backend
fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> {
trace!(target: "snapshot", "Replacing client database with {:?}", new_db);
let mut state_db = self.state_db.lock();
let mut chain = self.chain.write();
let mut tracedb = self.tracedb.write();
self.miner.clear();
let db = self.db.write();
let cache_size = state_db.cache_size();
*state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE), cache_size);
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
*tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
impl BlockChainClient for Client {
fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result<Executed, CallError> {
let header = self.block_header(block).ok_or(CallError::StatePruned)?;
let last_hashes = self.build_last_hashes(header.parent_hash());
number: header.number(),
author: header.author(),
timestamp: header.timestamp(),
difficulty: header.difficulty(),
let mut state = self.state_at(block).ok_or(CallError::StatePruned)?;
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
let needed_balance = t.value + t.gas * t.gas_price;
if balance < needed_balance {
// give the sender a sufficient balance
state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty);
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let mut ret = Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)?;
// TODO gav move this into Executive.
ret.state_diff = original_state.map(|original| state.diff_from(original));
Ok(ret)
fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError> {
const UPPER_CEILING: u64 = 1_000_000_000_000u64;
let header = self.block_header(block).ok_or(CallError::StatePruned)?;
let last_hashes = self.build_last_hashes(header.parent_hash());
let env_info = EnvInfo {
number: header.number(),
author: header.author(),
timestamp: header.timestamp(),
difficulty: header.difficulty(),
last_hashes: last_hashes,
gas_used: U256::zero(),
};
// that's just a copy of the state.
let original_state = self.state_at(block).ok_or(CallError::StatePruned)?;
let balance = original_state.balance(&sender);
let options = TransactOptions { tracing: true, vm_tracing: false, check_nonce: false };
let cond = |gas| {
let mut tx = t.as_unsigned().clone();
let mut state = original_state.clone();
let needed_balance = tx.value + tx.gas * tx.gas_price;
if balance < needed_balance {
// give the sender a sufficient balance
state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty);
}
Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm)
.transact(&tx, options.clone())
.map(|r| r.exception.is_none())
.unwrap_or(false)
};
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
if !cond(upper) {
// impossible at block gas limit - try `UPPER_CEILING` instead.
// TODO: consider raising limit by powers of two.
upper = UPPER_CEILING.into();
if !cond(upper) {
trace!(target: "estimate_gas", "estimate_gas failed with {}", upper);
return Err(CallError::Execution(ExecutionError::Internal))
}
}
let lower = t.gas_required(&self.engine.schedule(&env_info)).into();
if cond(lower) {
trace!(target: "estimate_gas", "estimate_gas succeeded with {}", lower);
return Ok(lower)
}
/// Find transition point between `lower` and `upper` where `cond` changes from `false` to `true`.
/// Returns the lowest value between `lower` and `upper` for which `cond` returns true.
/// We assert: `cond(lower) = false`, `cond(upper) = true`
fn binary_chop<F>(mut lower: U256, mut upper: U256, mut cond: F) -> U256 where F: FnMut(U256) -> bool {
while upper - lower > 1.into() {
let mid = (lower + upper) / 2.into();
trace!(target: "estimate_gas", "{} .. {} .. {}", lower, mid, upper);
let c = cond(mid);
match c {
true => upper = mid,
false => lower = mid,
};
trace!(target: "estimate_gas", "{} => {} .. {}", c, lower, upper);
}
upper
}
// binary chop to non-excepting call with gas somewhere between 21000 and block gas limit
trace!(target: "estimate_gas", "estimate_gas chopping {} .. {}", lower, upper);
Ok(binary_chop(lower, upper, cond))
}
fn replay(&self, id: TransactionId, analytics: CallAnalytics) -> Result<Executed, CallError> {
let address = self.transaction_address(id).ok_or(CallError::TransactionNotFound)?;
let header = self.block_header(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?;
let body = self.block_body(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?;
let mut state = self.state_at_beginning(BlockId::Hash(address.block_hash)).ok_or(CallError::StatePruned)?;
if address.index >= txs.len() {
return Err(CallError::TransactionNotFound);
}
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
let last_hashes = self.build_last_hashes(header.hash());
let mut env_info = EnvInfo {
number: header.number(),
author: header.author(),
timestamp: header.timestamp(),
difficulty: header.difficulty(),
last_hashes: last_hashes,
gas_limit: header.gas_limit(),
const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed";
let rest = txs.split_off(address.index);
for t in txs {
let t = SignedTransaction::new(t).expect(PROOF);
match Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(&t, Default::default()) {