Newer
Older
fn ensure_sequential_finalization(
&self,
header: &Block::Header,
last_finalized: Option<Block::Hash>,
let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
if *header.parent_hash() != last_finalized {
return Err(::client::error::Error::NonSequentialFinalization(
format!("Last finalized {:?} not parent of {:?}", last_finalized, header.hash()),
).into());
}
Ok(())
}
fn finalize_block_with_transaction(
&self,
transaction: &mut DBTransaction,
hash: &Block::Hash,
header: &Block::Header,
last_finalized: Option<Block::Hash>,
justification: Option<Justification>,
finalization_displaced: &mut Option<FinalizationDisplaced<Block::Hash, NumberFor<Block>>>,
) -> ClientResult<(Block::Hash, <Block::Header as HeaderT>::Number, bool, bool)> {
// TODO: ensure best chain contains this block.
let number = *header.number();
self.ensure_sequential_finalization(header, last_finalized)?;
self.note_finalized(
transaction,
header,
*hash,
finalization_displaced,
)?;
if let Some(justification) = justification {
transaction.put(
columns::JUSTIFICATION,
&utils::number_and_hash_to_lookup_key(number, hash)?,
&justification.encode(),
);
Ok((*hash, number, false, true))
// performs forced canonicaliziation with a delay after importing a non-finalized block.
fn force_delayed_canonicalize(
&self,
transaction: &mut DBTransaction,
hash: Block::Hash,
number: NumberFor<Block>,
)
if number_u64 > self.canonicalization_delay {
let new_canonical = number_u64 - self.canonicalization_delay;
if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) {
return Ok(())
}
let hash = if new_canonical == number_u64 {
::client::blockchain::HeaderBackend::hash(&self.blockchain, new_canonical.saturated_into())?
.expect("existence of block with number `new_canonical` \
implies existence of blocks with all numbers before it; qed")
trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash);
let commit = self.storage.state_db.canonicalize_block(&hash)
.map_err(|e: state_db::Error<io::Error>| client::error::Error::from(format!("State database error: {:?}", e)))?;
apply_state_commit(transaction, commit);
};
Ok(())
}
fn try_commit_operation(&self, mut operation: BlockImportOperation<Block, Blake2Hasher>)
let mut finalization_displaced_leaves = None;
operation.apply_aux(&mut transaction);
let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash;
for (block, justification) in operation.finalized_blocks {
let block_hash = self.blockchain.expect_block_hash_from_id(&block)?;
let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?;
meta_updates.push(self.finalize_block_with_transaction(
&mut transaction,
&block_hash,
&block_header,
Some(last_finalized_hash),
justification,
&mut finalization_displaced_leaves,
)?);
last_finalized_hash = block_hash;
let imported = if let Some(pending_block) = operation.pending_block {
let parent_hash = *pending_block.header.parent_hash();
let number = pending_block.header.number().clone();
// blocks are keyed by number + hash.
let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
let (enacted, retracted) = if pending_block.leaf_state.is_best() {
self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?
} else {
(Default::default(), Default::default())
};
Stanislav Tkach
committed
utils::insert_hash_to_key_mapping(
&mut transaction,
columns::KEY_LOOKUP,
number,
hash,
let header_metadata = CachedHeaderMetadata::from(&pending_block.header);
self.blockchain.insert_header_metadata(
header_metadata.hash,
header_metadata,
);
transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode());
if let Some(body) = &pending_block.body {
transaction.put(columns::BODY, &lookup_key, &body.encode());
}
if let Some(justification) = pending_block.justification {
transaction.put(columns::JUSTIFICATION, &lookup_key, &justification.encode());
}
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key);
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
let finalized = if operation.commit_state {
let mut changeset: state_db::ChangeSet<Vec<u8>> = state_db::ChangeSet::default();
for (key, (val, rc)) in operation.db_updates.drain() {
if rc > 0 {
changeset.inserted.push((key, val.to_vec()));
} else if rc < 0 {
changeset.deleted.push(key);
}
let number_u64 = number.saturated_into::<u64>();
let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset)
.map_err(|e: state_db::Error<io::Error>| client::error::Error::from(format!("State database error: {:?}", e)))?;
apply_state_commit(&mut transaction, commit);
// Check if need to finalize. Genesis is always finalized instantly.
let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
finalized
} else {
false
};
let header = &pending_block.header;
let is_best = pending_block.leaf_state.is_best();
let changes_trie_updates = operation.changes_trie_updates;
self.changes_tries_storage.commit(&mut transaction, changes_trie_updates);
Arkadiy Paronyan
committed
let cache = operation.old_state.release(); // release state reference so that it can be finalized
if finalized {
// TODO: ensure best chain contains this block.
self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
self.note_finalized(
&mut transaction,
header,
hash,
&mut finalization_displaced_leaves,
)?;
} else {
// canonicalize blocks which are old enough, regardless of finality.
self.force_delayed_canonicalize(&mut transaction, hash, *header.number())?
debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best);
let mut leaves = self.blockchain.leaves.write();
let displaced_leaf = leaves.import(hash, number, parent_hash);
asynchronous rob
committed
leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX);
let mut children = children::read_children(&*self.storage.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)?;
children.push(hash);
children::write_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash, children);
meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized));
Arkadiy Paronyan
committed
Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache))
let cache_update = if let Some(set_head) = operation.set_head {
if let Some(header) = ::client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? {
let number = header.number();
let hash = header.hash();
let (enacted, retracted) = self.set_head_with_transaction(
&mut transaction,
hash.clone(),
(number.clone(), hash.clone())
)?;
meta_updates.push((hash, *number, true, false));
Some((enacted, retracted))
return Err(client::error::Error::UnknownBlock(format!("Cannot set head {:?}", set_head)))
} else {
None
};
let write_result = self.storage.db.write(transaction).map_err(db_err);
if let Some(changes_trie_cache_update) = operation.changes_trie_cache_update {
self.changes_tries_storage.commit_cache(changes_trie_cache_update);
}
Arkadiy Paronyan
committed
if let Some((number, hash, enacted, retracted, displaced_leaf, is_best, mut cache)) = imported {
let mut leaves = self.blockchain.leaves.write();
let mut undo = leaves.undo();
if let Some(displaced_leaf) = displaced_leaf {
undo.undo_import(displaced_leaf);
}
if let Some(finalization_displaced) = finalization_displaced_leaves {
undo.undo_finalization(finalization_displaced);
Arkadiy Paronyan
committed
cache.sync_cache(
&enacted,
&retracted,
operation.storage_updates,
operation.child_storage_updates,
Some(hash),
Some(number),
if let Some((enacted, retracted)) = cache_update {
self.shared_cache.lock().sync(&enacted, &retracted);
}
for (hash, number, is_best, is_finalized) in meta_updates {
self.blockchain.update_meta(hash, number, is_best, is_finalized);
}
Ok(())
}
// write stuff to a transaction after a new block is finalized.
// this canonicalizes finalized blocks. Fails if called with a block which
// was not a child of the last finalized block.
fn note_finalized(
&self,
transaction: &mut DBTransaction,
f_header: &Block::Header,
f_hash: Block::Hash,
displaced: &mut Option<FinalizationDisplaced<Block::Hash, NumberFor<Block>>>
Block: BlockT<Hash=H256>,
{
let f_num = f_header.number().clone();
if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::<u64>() > c).unwrap_or(true) {
let parent_hash = f_header.parent_hash().clone();
let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?;
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key);
let commit = self.storage.state_db.canonicalize_block(&f_hash)
.map_err(|e: state_db::Error<io::Error>| client::error::Error::from(format!("State database error: {:?}", e)))?;
apply_state_commit(transaction, commit);
let changes_trie_config = self.changes_trie_config(parent_hash)?;
if let Some(changes_trie_config) = changes_trie_config {
self.changes_tries_storage.prune(&changes_trie_config, transaction, f_hash, f_num);
}
let new_displaced = self.blockchain.leaves.write().finalize_height(f_num);
match displaced {
x @ &mut None => *x = Some(new_displaced),
&mut Some(ref mut displaced) => displaced.merge(new_displaced),
}
Ok(())
}
}
fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitSet<Vec<u8>>) {
for (key, val) in commit.data.inserted.into_iter() {
transaction.put(columns::STATE, &key[..], &val);
}
for key in commit.data.deleted.into_iter() {
transaction.delete(columns::STATE, &key[..]);
}
for (key, val) in commit.meta.inserted.into_iter() {
transaction.put(columns::STATE_META, &key[..], &val);
}
for key in commit.meta.deleted.into_iter() {
transaction.delete(columns::STATE_META, &key[..]);
}
}
impl<Block> client_api::backend::AuxStore for Backend<Block> where Block: BlockT<Hash=H256> {
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
D: IntoIterator<Item=&'a &'b [u8]>,
>(&self, insert: I, delete: D) -> ClientResult<()> {
let mut transaction = DBTransaction::new();
for (k, v) in insert {
transaction.put(columns::AUX, k, v);
for k in delete {
transaction.delete(columns::AUX, k);
}
self.storage.db.write(transaction).map_err(db_err)?;
Ok(())
}
fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
Ok(self.storage.db.get(columns::AUX, key).map(|r| r.map(|v| v.to_vec())).map_err(db_err)?)
}
}
impl<Block> client_api::backend::Backend<Block, Blake2Hasher> for Backend<Block> where Block: BlockT<Hash=H256> {
type BlockImportOperation = BlockImportOperation<Block, Blake2Hasher>;
type Blockchain = BlockchainDb<Block>;
Arkadiy Paronyan
committed
type State = CachingState<Blake2Hasher, RefTrackingState<Block>, Block>;
type ChangesTrieStorage = DbChangesTrieStorage<Block>;
type OffchainStorage = offchain::LocalStorage;
fn begin_operation(&self) -> ClientResult<Self::BlockImportOperation> {
let old_state = self.state_at(BlockId::Hash(Default::default()))?;
Ok(BlockImportOperation {
pending_block: None,
old_state,
db_updates: PrefixedMemoryDB::default(),
storage_updates: Default::default(),
child_storage_updates: Default::default(),
changes_trie_updates: MemoryDB::default(),
changes_trie_cache_update: None,
aux_ops: Vec::new(),
finalized_blocks: Vec::new(),
commit_state: false,
fn begin_state_operation(
&self,
operation: &mut Self::BlockImportOperation,
block: BlockId<Block>,
) -> ClientResult<()> {
operation.old_state = self.state_at(block)?;
operation.commit_state = true;
fn commit_operation(&self, operation: Self::BlockImportOperation)
{
match self.try_commit_operation(operation) {
Ok(_) => {
self.storage.state_db.apply_pending();
Ok(())
},
e @ Err(_) => {
self.storage.state_db.revert_pending();
e
}
}
}
fn finalize_block(&self, block: BlockId<Block>, justification: Option<Justification>)
let mut transaction = DBTransaction::new();
let hash = self.blockchain.expect_block_hash_from_id(&block)?;
let header = self.blockchain.expect_header(block)?;
let mut displaced = None;
let commit = |displaced| {
let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction(
&mut transaction,
&hash,
&header,
None,
justification,
)?;
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, number, is_best, is_finalized);
Ok(())
};
Ok(()) => self.storage.state_db.apply_pending(),
e @ Err(_) => {
self.storage.state_db.revert_pending();
if let Some(displaced) = displaced {
self.blockchain.leaves.write().undo().undo_finalization(displaced);
}
return e;
}
}
Svyatoslav Nikolsky
committed
fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> {
Svyatoslav Nikolsky
committed
}
fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
Some(self.offchain_storage.clone())
}
fn revert(&self, n: NumberFor<Block>) -> ClientResult<NumberFor<Block>> {
let mut best = self.blockchain.info().best_number;
let finalized = self.blockchain.info().finalized_number;
let revertible = best - finalized;
let n = if revertible < n { revertible } else { n };
for c in 0 .. n.saturated_into::<u64>() {
if best.is_zero() {
return Ok(c.saturated_into::<NumberFor<Block>>())
}
let mut transaction = DBTransaction::new();
match self.storage.state_db.revert_one() {
Some(commit) => {
apply_state_commit(&mut transaction, commit);
let removed = self.blockchain.header(BlockId::Number(best))?.ok_or_else(
|| client::error::Error::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
best -= One::one(); // prev block
|| client::error::Error::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
let key = utils::number_and_hash_to_lookup_key(best.clone(), &hash)?;
transaction.put(columns::META, meta_keys::BEST_BLOCK, &key);
transaction.delete(columns::KEY_LOOKUP, removed.hash().as_ref());
children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, hash);
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, best, true, false);
self.blockchain.leaves.write().revert(removed.hash().clone(), removed.number().clone(), removed.parent_hash().clone());
None => return Ok(c.saturated_into::<NumberFor<Block>>())
fn blockchain(&self) -> &BlockchainDb<Block> {
fn used_state_cache_size(&self) -> Option<usize> {
let used = (*&self.shared_cache).lock().used_storage_cache_size();
Some(used)
}
fn state_at(&self, block: BlockId<Block>) -> ClientResult<Self::State> {
use client::blockchain::HeaderBackend as BcHeaderBackend;
// special case for genesis initialization
match block {
Svyatoslav Nikolsky
committed
BlockId::Hash(h) if h == Default::default() => {
let genesis_storage = DbGenesisStorage::new();
let root = genesis_storage.0.clone();
Arkadiy Paronyan
committed
let db_state = DbState::new(Arc::new(genesis_storage), root);
let state = RefTrackingState::new(db_state, self.storage.clone(), None);
return Ok(CachingState::new(state, self.shared_cache.clone(), None));
Svyatoslav Nikolsky
committed
},
match self.blockchain.header(block) {
Ok(Some(ref hdr)) => {
let hash = hdr.hash();
if !self.have_state_at(&hash, *hdr.number()) {
return Err(client::error::Error::UnknownBlock(format!("State already discarded for {:?}", block)))
}
if let Ok(()) = self.storage.state_db.pin(&hash) {
let root = H256::from_slice(hdr.state_root().as_ref());
Arkadiy Paronyan
committed
let db_state = DbState::new(self.storage.clone(), root);
let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash.clone()));
Ok(CachingState::new(state, self.shared_cache.clone(), Some(hash)))
} else {
Err(client::error::Error::UnknownBlock(format!("State already discarded for {:?}", block)))
Ok(None) => Err(client::error::Error::UnknownBlock(format!("Unknown state for block {:?}", block))),
fn have_state_at(&self, hash: &Block::Hash, number: NumberFor<Block>) -> bool {
if self.is_archive {
match self.blockchain.header(BlockId::Hash(hash.clone())) {
Ok(Some(header)) => {
state_machine::Storage::get(self.storage.as_ref(), &header.state_root(), (&[], None)).unwrap_or(None).is_some()
},
_ => false,
}
} else {
!self.storage.state_db.is_pruned(hash, number.saturated_into::<u64>())
}
fn destroy_state(&self, state: Self::State) -> ClientResult<()> {
Arkadiy Paronyan
committed
if let Some(hash) = state.cache.parent_hash.clone() {
let is_best = self.blockchain.meta.read().best_hash == hash;
state.release().sync_cache(&[], &[], vec![], vec![], None, None, is_best);
}
Ok(())
}
fn get_import_lock(&self) -> &RwLock<()> {
impl<Block> client_api::backend::LocalBackend<Block, Blake2Hasher> for Backend<Block>
where Block: BlockT<Hash=H256> {}
/// TODO: remove me in #3201
pub fn unused_sink<Block: BlockT>(cache_tx: crate::cache::DbCacheTransaction<Block>) {
cache_tx.on_block_revert(&crate::cache::ComplexBlockId::new(Default::default(), 0.into())).unwrap();
unimplemented!()
}
#[cfg(test)]
mod tests {
use super::*;
Stanislav Tkach
committed
use crate::columns;
use client_api::backend::{Backend as BTrait, BlockImportOperation as Op};
use client::blockchain::Backend as BLBTrait;
use sr_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper};
use sr_primitives::traits::{Hash, BlakeTwo256};
use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage};
use header_metadata::{lowest_common_ancestor, tree_route};
type Block = RawBlock<ExtrinsicWrapper<u64>>;
fn prepare_changes(changes: Vec<(Vec<u8>, Vec<u8>)>) -> (H256, MemoryDB<Blake2Hasher>) {
let mut changes_root = H256::default();
let mut changes_trie_update = MemoryDB::<Blake2Hasher>::default();
let mut trie = TrieDBMut::<Blake2Hasher>::new(
&mut changes_trie_update,
&mut changes_root
);
for (key, value) in changes {
trie.insert(&key, &value).unwrap();
}
}
(changes_root, changes_trie_update)
}
fn insert_header(
backend: &Backend<Block>,
number: u64,
parent_hash: H256,
changes: Vec<(Vec<u8>, Vec<u8>)>,
extrinsics_root: H256,
) -> H256 {
let (changes_root, changes_trie_update) = prepare_changes(changes);
let digest = Digest {
logs: vec![
DigestItem::ChangesTrieRoot(changes_root),
],
};
let header = Header {
number,
parent_hash,
state_root: BlakeTwo256::trie_root(Vec::new()),
digest,
extrinsics_root,
};
let header_hash = header.hash();
let block_id = if number == 0 {
BlockId::Hash(Default::default())
} else {
BlockId::Number(number - 1)
};
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block_id).unwrap();
op.set_block_data(header, Some(Vec::new()), None, NewBlockState::Best).unwrap();
op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap();
backend.commit_operation(op).unwrap();
header_hash
}
#[test]
fn block_hash_inserted_correctly() {
let backing = {
let db = Backend::<Block>::new_test(1, 0);
for i in 0..10 {
assert!(db.blockchain().hash(i).unwrap().is_none());
{
let id = if i == 0 {
BlockId::Hash(Default::default())
BlockId::Number(i - 1)
};
let mut op = db.begin_operation().unwrap();
db.begin_state_operation(&mut op, id).unwrap();
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
let header = Header {
number: i,
parent_hash: if i == 0 {
Default::default()
} else {
db.blockchain.hash(i - 1).unwrap().unwrap()
},
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
}
assert!(db.blockchain().hash(i).unwrap().is_some())
}
db.storage.db.clone()
};
let backend = Backend::<Block>::new(DatabaseSettings {
state_cache_size: 16777216,
state_cache_child_ratio: Some((50, 100)),
pruning: PruningMode::keep_blocks(1),
source: DatabaseSettingsSrc::Custom(backing),
}, 0).unwrap();
assert_eq!(backend.blockchain().info().best_number, 9);
for i in 0..10 {
assert!(backend.blockchain().hash(i).unwrap().is_some())
}
}
#[test]
fn set_state_data() {
let db = Backend::<Block>::new_test(2, 0);
let mut op = db.begin_operation().unwrap();
db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap();
number: 0,
parent_hash: Default::default(),
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage = vec![
(vec![1, 3, 5], vec![2, 4, 6]),
(vec![1, 2, 3], vec![9, 9, 9]),
];
header.state_root = op.old_state.storage_root(storage
.iter()
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
let hash = header.hash();
op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap();
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
let state = db.state_at(BlockId::Number(0)).unwrap();
assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6]));
assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None);
let mut op = db.begin_operation().unwrap();
db.begin_state_operation(&mut op, BlockId::Number(0)).unwrap();
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage = vec![
(vec![1, 3, 5], None),
(vec![5, 5, 5], Some(vec![4, 5, 6])),
];
let (root, overlay) = op.old_state.storage_root(storage.iter().cloned());
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
db.commit_operation(op).unwrap();
let state = db.state_at(BlockId::Number(1)).unwrap();
assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None);
assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9]));
assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6]));
}
}
#[test]
fn delete_only_when_negative_rc() {
let _ = ::env_logger::try_init();
let key;
let backend = Backend::<Block>::new_test(1, 0);
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap();
number: 0,
parent_hash: Default::default(),
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage: Vec<(_, _)> = vec![];
header.state_root = op.old_state.storage_root(storage
.iter()
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap();
key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
assert_eq!(backend.storage.db.get(
columns::STATE,
&trie::prefixed_key::<Blake2Hasher>(&key, EMPTY_PREFIX)
).unwrap().unwrap(), &b"hello"[..]);
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap();
number: 1,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage: Vec<(_, _)> = vec![];
header.state_root = op.old_state.storage_root(storage
.iter()
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
op.db_updates.insert(EMPTY_PREFIX, b"hello");
op.db_updates.remove(&key, EMPTY_PREFIX);
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
assert_eq!(backend.storage.db.get(
columns::STATE,
&trie::prefixed_key::<Blake2Hasher>(&key, EMPTY_PREFIX)
).unwrap().unwrap(), &b"hello"[..]);
let hash = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap();
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage: Vec<(_, _)> = vec![];
header.state_root = op.old_state.storage_root(storage
.iter()
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
let hash = header.hash();
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
assert!(backend.storage.db.get(
columns::STATE,
&trie::prefixed_key::<Blake2Hasher>(&key, EMPTY_PREFIX)
).unwrap().is_some());
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
hash
};
{
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap();
let mut header = Header {
number: 3,
parent_hash: hash,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
let storage: Vec<(_, _)> = vec![];
header.state_root = op.old_state.storage_root(storage
.iter()
.cloned()
.map(|(x, y)| (x, Some(y)))
).0.into();
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
assert!(backend.storage.db.get(
columns::STATE,
&trie::prefixed_key::<Blake2Hasher>(&key, EMPTY_PREFIX)
).unwrap().is_none());
backend.finalize_block(BlockId::Number(1), None).unwrap();
backend.finalize_block(BlockId::Number(2), None).unwrap();
backend.finalize_block(BlockId::Number(3), None).unwrap();
assert!(backend.storage.db.get(
columns::STATE,
&trie::prefixed_key::<Blake2Hasher>(&key, EMPTY_PREFIX)
).unwrap().is_none());
Svyatoslav Nikolsky
committed
#[test]
fn changes_trie_storage_works() {
let backend = Backend::<Block>::new_test(1000, 100);
backend.changes_tries_storage.meta.write().finalized_number = 1000;
Svyatoslav Nikolsky
committed
let check_changes = |backend: &Backend<Block>, block: u64, changes: Vec<(Vec<u8>, Vec<u8>)>| {
let (changes_root, mut changes_trie_update) = prepare_changes(changes);
let anchor = state_machine::ChangesTrieAnchorBlockId {
hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(),
number: block
};
assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root)));
Svyatoslav Nikolsky
committed
for (key, (val, _)) in changes_trie_update.drain() {
assert_eq!(backend.changes_trie_storage().unwrap().get(&key, EMPTY_PREFIX), Ok(Some(val)));
Svyatoslav Nikolsky
committed
}
};
let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())];
let changes1 = vec![
(b"key_at_1".to_vec(), b"val_at_1".to_vec()),
(b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()),
];
let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())];
let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default());
let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default());
let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default());
Svyatoslav Nikolsky
committed
// check that the storage contains tries for all blocks
check_changes(&backend, 0, changes0);
check_changes(&backend, 1, changes1);
check_changes(&backend, 2, changes2);
}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
#[test]
fn changes_trie_storage_works_with_forks() {
let backend = Backend::<Block>::new_test(1000, 100);
let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())];
let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())];
let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())];
let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default());
let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default());
let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default());
let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())];
let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())];
let block2_1_0 = insert_header(&backend, 3, block2, changes2_1_0.clone(), Default::default());
let block2_1_1 = insert_header(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default());
let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())];
let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())];
let block2_2_0 = insert_header(&backend, 3, block2, changes2_2_0.clone(), Default::default());
let block2_2_1 = insert_header(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default());
// finalize block1
backend.changes_tries_storage.meta.write().finalized_number = 1;
// branch1: when asking for finalized block hash
let (changes1_root, _) = prepare_changes(changes1);
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root)));
// branch2: when asking for finalized block hash
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root)));
// branch1: when asking for non-finalized block hash (search by traversal)
let (changes2_1_0_root, _) = prepare_changes(changes2_1_0);
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root)));
// branch2: when asking for non-finalized block hash (search using canonicalized hint)
let (changes2_2_0_root, _) = prepare_changes(changes2_2_0);
let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root)));
// finalize first block of branch2 (block2_2_0)
backend.changes_tries_storage.meta.write().finalized_number = 3;